text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python3
"""
This program computes oligonucleotide profiles (microcomposition) for sequences given as arguments.
Depending on the set of parameters used, the program guess what is possible to do and will perform accordingly. Use the proper (and minimal) parameter set you need to achieve what you want.
Luckily, the program will tell you what it does, and how you can change its behaviour by providing him with supplementary parameters.
See the help by calling the program without any argument.
dependencies:
Biopython
numpy
cython
as root/admin if wanted installed globally
aptitude/yum install python3-dev python3-setuptools
easy_install -U setuptools
easy_install3 -U setuptools
pip install biopython
pip3 install biopython
pip install cython
pip3 install cython
pip install numpy
pip3 install numpy
"""
__author__ = "Ludovic V. Mallet, PhD"
__copyright__ = ""
__date__ = "2016.03.22"
__licence__ = "GPLv3"
__version__ = "0.1"
__status__ = "alpha"
__email__ = ""
from scoop import futures
import os, re, math, sys, argparse, time
import tempfile
import numpy as np
import multiprocessing, shlex, subprocess, pickle, shutil
from Bio import SeqIO
from Bio.Seq import Seq
from collections import Counter
from itertools import product
import numpy as np
from sklearn.externals.joblib import Parallel, delayed
from sklearn.externals.joblib import dump, load
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.utils import gen_even_slices
np.seterr(divide='ignore', invalid='ignore')
#TBF: wtf way to long ! (-:
#minimum_number_of_windows_per_fasta_entry_to_use_multiple_cpu_for_this_entry=20
#LM Fine by me, this very variable makes the implementation very much complicated and triggers the multicore or serial mode depending on the number of windows in a contig. therefore I wanted to be explicit on what it does. But I guess this mere sentence explqins it now ^^.
min_nb_w_per_fasta_for_mul_cpu=20
## Distance functions
def posdef_check_value(d):
d[np.isnan(d)]=0
d[np.isinf(d)]=0
def KL(a, b):
""" compute the KL distance
"""
if a.ndim == 1 and b.ndim == 1:
d = a * np.log(a/b)
posdef_check_value(d)
res = np.sum(d)
elif a.ndim == 2 and b.ndim == 2:
X, Y = check_pairwise_arrays(a, b)
X = X[:,np.newaxis]
d = X * np.log(X/Y)
posdef_check_value(d)
res = np.sum(d, axis=2).T
else:
print("Dimension erro in KL, a={}, b={}".format(a.ndim, b.ndim), file=sys.stderr)
sys.exit(1)
return res
def Eucl(a, b):
""" compute Euclidean distance
"""
d = pow(a-b,2)
posdef_check_value(d)
return np.sqrt(np.sum(d))*1000 # Scaling
def JSD(a, b):
""" Compute JSD distance
"""
if a.ndim == 1 and b.ndim == 1:
h = 0.5 * (a + b)
d = 0.5 * (KL(a, h) + KL(b, h))
elif a.ndim==2 and b.ndim == 1:
h = 0.5 * (a[np.newaxis,:] + b)
d1 = a[np.newaxis,:] * np.log(a[np.newaxis,:]/h)
posdef_check_value(d1)
d1 = np.sum(d1, axis=2)
d2 = b * np.log(b/h)
posdef_check_value(d2)
d2 = np.sum(d2, axis=2)
d = 0.5 * (d1 + d2)
else:
h = 0.5 * (a[np.newaxis,:] + b[:, np.newaxis])
d1 = a[np.newaxis,:] * np.log(a[np.newaxis,:]/h)
posdef_check_value(d1)
d1 = np.sum(d1, axis=2)
d2 = b[:, np.newaxis] * np.log(b[:, np.newaxis]/h)
posdef_check_value(d2)
d2 = np.sum(d2, axis=2)
d = 0.5 * (d1 + d2)
#d = d.T
return d*1000 # Scaling
def vector_to_matrix(profile):
return list((zip(*(iter(profile),)*int(math.sqrt(len(profile))))))
def read_seq_chunk_pos(sequences, chunksize):
""" read a first chunk of fasta sequences to be processed
Parameters:
-----------
sequences: list
list of nucleotide sequences
chunksize: int
the number of fasta entries to read
Return:
-------
seqchunk: list
a list of SeqRecord
"""
seqchunk = list()
start = 0
for seq in sequences:
seqchunk.append(str(record.seq))
if len(seqchunk) == chunksize:
yield start, start + chunksize, seqchunk
start += chunksize
seqchunk = list()
# the last chunk
if seqchunk != []:
yield start, start+len(seqchunk), seqchunk
def read_seq_chunk(genome, chunksize, pattern, strand):
""" read a first chunk of fasta sequences to be processed
Parameters:
-----------
sequences: list
list of nucleotide sequences
chunksize: int
the number of fasta entries to read
Return:
-------
seqchunk: list
a list of SeqRecord
"""
seqchunk = list()
pattern=str(pattern)
for record in SeqIO.parse(genome, "fasta"):
seqchunk.append((str(record.seq), pattern, strand))
if len(seqchunk) == chunksize:
yield seqchunk
seqchunk = list()
# the last chunk
if seqchunk != []:
yield seqchunk
def select_strand (seq, strand="both"):
""" choose which strand to work on
Parameters:
-----------
seq: Seq
a str object containing a nucleotide sequence, that can be converted into a Bio.Seq object
strand: string
select wich strand to use
Return:
-------
seq: string
the sequence strand
"""
Bioseq_record = Seq(seq)
if (strand == "both"):
new_seq = str(str(seq)+str(Bioseq_record.reverse_complement()))
elif (strand == "minus"):
new_seq = str(Bioseq_record.reverse_complement())
elif (strand == "plus"):
new_seq = str(seq)
else:
print("Error, strand parameter of selectd_strand() should be choose from {'both', 'minus', 'plus'}", file=sys.stderr)
sys.exit(1)
return new_seq
def cut_sequence_and_count_pattern(seq, pattern, strand):
""" cut sequence in spaced-words of size k
Parameters:
-----------
seq: string
The nucleotide sequence
pattern: string
the binary space pattern to extract spaced-words example: 1001010001
ksize is inferred from the number of '1' in the pattern
Return:
-------
count_words: dict
a dictionary storing the number of observations of each word
kword_count: int
the total number of words
"""
seq = select_strand(seq, strand)
# we work on upper case letters
seq = seq.upper()
seq_words = list()
#ksize=pattern.count('1')
pattern=str(pattern)
target_index = [i for i,j in enumerate(pattern) if j=="1"]
# re.split: excludes what is not a known characterised nucleotide
for subseq in re.split('[^ACGT]+', seq):
if (len(subseq) >= len(pattern)):
#get all kword in sub-sequence
seq_words.extend("".join(list(map( lambda x: subseq[i+x], target_index))) for i in range(len(subseq)-(len(pattern)-1)))
count_words = Counter(seq_words)
return count_words
def count2freq(count_words, ksize):
""" transform raw count into a feature vector of frequencies
Parameters:
-----------
count_words: dict
a dictionary storing the number of observations of each word
kword_count: int
the total number of words
ksize: int
the size of the kmer
Return:
-------
features: np.array
a feature vector of the frequency of each word in the read
"""
features = list()
kword_count = sum(count_words.values())
if kword_count > 0:
# iterate over letter product
for letter_word in product(("C","G","A","T"), repeat=ksize):
kword = "".join(letter_word)
if kword in count_words:
features.append(count_words[kword]/kword_count)
else:
features.append(0)
else:
features = [0 for kword in range(4**ksize)]
return np.array(features)
def compute_frequency(seq, n_max_freq_in_windows=1.0, pattern="1111", strand="both"):
""" compute kmer frequency, ie feature vector of each read
Parameters:
-----------
seq: string
The nucleotide sequence
pattern: string
the binary space pattern to extract spaced-words example: 1001010001
ksize is inferred from the number of '1' in the pattern
strand: string
which strand to used
Return:
-------
features: np.array
a feature vector of the frequency of each word in the read
"""
pattern=str(pattern)
ksize = pattern.count("1")
if((seq.count('N')/len(seq)) <= float(n_max_freq_in_windows)):
count_words = cut_sequence_and_count_pattern(seq, pattern, strand)
# create feature vector
features = count2freq(count_words, ksize)
else:
features = np.array([np.nan] * ksize**4)
return features
def compute_whole_composition(genome, pattern, strand, nb_jobs=1):
""" for each sequence of the genome, separately compute words numbers,
aggregate results and convert to frequency
"""
pattern=str(pattern)
ksize = pattern.count("1")
fd = delayed(cut_sequence_and_count_pattern)
counts = Parallel(n_jobs=nb_jobs, verbose=0)(fd(str(record.seq), pattern, strand)
for record in SeqIO.parse(genome, "fasta"))
# aggregate
count_words = Counter()
for par_counts in counts:
for word in par_counts:
count_words[word] += par_counts[word]
# create feature vector
frequency = count2freq(count_words, ksize)
return frequency
def compute_distance_joblib(mth_dist, mcp, seq, pattern, strand , n_max_freq_in_windows):
freq = compute_frequency(seq, n_max_freq_in_windows, pattern, strand)
if mth_dist == "JSD":
return JSD(freq, mcp)
else:
return Eucl(freq, mcp)
def compute_distances(mthdrun, large, mth_dist, mcp, sequences, pattern, strand, njobs, n_max_freq_in_windows):
if mthdrun == "joblib":
if large == "None":
fd = delayed(compute_distance_joblib)
res = Parallel(n_jobs=njobs, verbose=0)(fd(mth_dist, mcp, sequences[i], pattern, strand , n_max_freq_in_windows)
for i in range(len(sequences)))
res = np.array(res)
#print(res.shape)
return res
def make_genome_chunk(genome, windows_size, windows_step, options, nbchunk=500):
""" create chunk of genome sequences
"""
chunk_info = list()
chunk_sequences = list()
for record in SeqIO.parse(genome, "fasta"):
seq = str(record.seq)
seq_id = record.id
if len(seq) < windows_size: #only enough to compute one window, no sliding,
chunk_info.append([seq_id, 0, int(len(seq))])
chunk_sequences.append(seq)
if len(chunk_info) == nbchunk:
yield chunk_info, chunk_sequences
chunk_info, chunk_sequences = list(), list()
elif len(seq) < min_nb_w_per_fasta_for_mul_cpu*windows_step:
#not many windows in this contig, so better launching it in serial rather than in parallel
for s in range(0, len(seq)-windows_size, windows_step):
if s==0:
# to avoid border effects being a problem in the code, we use only the simple formula
# start+windows_size/2 (+/-) windows_step/2 to find the significant center part of the windows.
# when several windows overlap, this centered part, long as the windows step is the most representative of the windows,
# not representing as much other part of this window that are overlapped by other windows.
# BUT: this simple formula has border effects, so we manually correct the start of the first window and
# the stop of the last window to match the contig borders.
displayed_start=1
else:
displayed_start=int(s+windows_size/2-windows_step/2)
if s==len(seq)-windows_size:
displayed_stop=len(seq)
else:
displayed_stop=int(s+windows_size/2+windows_step/2)
window=seq[s:s+windows_size]
chunk_info.append([seq_id, displayed_start, displayed_stop])
chunk_sequences.append(window)
if len(chunk_info) == nbchunk:
yield chunk_info, chunk_sequences
chunk_info, chunk_sequences = list(), list()
else:
for s in range(0,len(seq)-windows_size,windows_step):
start, stop = int(s+windows_size/2-windows_step/2),int(s+windows_size/2+windows_step/2)
if start == (windows_size/2-windows_step/2):
displayed_start=1
else:
displayed_start=start
if stop-windows_step/2+windows_size/2 >= len(seq)-windows_step and stop-windows_step/2+windows_size/2 <= len(seq):
displayed_stop = len(seq)
else:
displayed_stop = stop
chunk_info.append([seq_id, displayed_start, displayed_stop])
chunk_sequences.append(seq[s:s+windows_size])
if len(chunk_info) == nbchunk:
yield chunk_info, chunk_sequences
chunk_info, chunk_sequences = list(), list()
# last chunk to return
if len(chunk_info) != 0:
yield chunk_info, chunk_sequences
def sliding_windows_distances(genome, mcp_comparison, mth_dist="JSD", pattern="1111", windows_size=5000, windows_step=500, options=None):
""" Stuff
Parameters:
===========
seq: Seq object
the processed sequence
seq_id: string
the id of the processed sequence
mcp_comparison: np.array
the kmer frequencies of the query (host or genome)
position: bool
???
mth_dist: string
the distance method
windows_size: int
the size of the sequence to process
windows_step: int
the number of nucleotide to slid the window
pattern: string
kmer pattern with gap
"""
cnt = 0
t1_tot = time.time()
for chunk_info, sequences in make_genome_chunk(genome, windows_size, windows_step, options, 50000):
res = list()
if mth_dist == "JSD":
vec_dist = compute_distances("joblib", "None", mth_dist, mcp_comparison,
sequences, pattern, options.strand,
options.threads_max, options.n_max_freq_in_windows)
for i in range(vec_dist.shape[0]):
res.append(chunk_info[i]+[vec_dist[i]])
else:
#freq = compute_frequencies("joblib", "None", sequences, pattern,
#options.strand, options.threads_max,
#options.n_max_freq_in_windows, options.workdir)
#t2 = time.time()
vec_dist = pairwise_distances(freq, mcp_comparison, n_jobs=options.threads_max)
#print(np.allclose(test_dist, vec_dist))
#print(vec_dist.shape)
for i in range(vec_dist.shape[0]):
res.append(chunk_info[i]+[vec_dist[i]])
yield res
#t3 = time.time()
#print(cnt, t2-t1, t3-t2)
#cnt += 1
#t2_tot = time.time()
#print("Done in", t2_tot - t1_tot)
def concate_sequence(inputfile, file_format="fasta"):
""" agglomerate all sequences in memory in a single Seq object
Parammeters:
============
inputfile: string
path to the sequence file
file_format: string
format of the sequence file (see BioPython SeqIO.parse for a list of available format
Return:
=======
whole_seq: string
a unique string of sequence separated by N, k-mer with N are not taken into account (boundaries of sequences)
"""
seq = []
for record in SeqIO.parse(inputfile, file_format):
seq.append(str(record.seq))
# N is not interpreted to compute frequencies, so by putting one between two sequences,
# we avoid to account for the chimeric words that would be created by juxtaposing the 2 sequences.
whole_seq = "N".join(seq)
return whole_seq
def get_cmd():
""" read command line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--assembly", action="store", required=True,
dest="genome", help="multifasta of the genome assembly")
parser.add_argument("-c", "--conta", action="store", dest="conta",
help="multifasta of the contaminant species training set")
parser.add_argument("-r", "--host", action="store", dest="host",
help="optional host species training set in multifasta")
parser.add_argument("-n", "--n_max_freq_in_windows", action="store",
type=float, dest="n_max_freq_in_windows", default = 0.4,
help = "maximum proportion of N tolerated in a window to compute the microcomposition anyway [0~1]. "
"Too much 'N's will reduce the number of kmer counts and will artificially coarse the frequency resolutions")
#"If your assembly contains many stretches of 'N's, "
#"consider rising this parameter and shortening the windows"
#" step in order to allow computation and signal in the "
#"output, it might cost you computational time though. "
#"Windows which do not meet this criteria will be affected "
#"a distance of 'nan'")
parser.add_argument("-k", "--lgMot", action="store", dest="k", type=int, default=4,
help="word wise/ kmer lenght/ k [default:%(default)d]")
parser.add_argument("-p", "--pattern", action="store", dest="pattern",
help="pattern to use for frequency computation")
parser.add_argument("-w", "--windows_size", action="store", dest="windows_size", type=int, default=5000,
help="Sliding windows size (bp)")
parser.add_argument("-t", "--windows_step", action="store", dest="windows_step", type=int, default=500,
help="Sliding windows step size(bp)")
parser.add_argument("-d", "--distance", action="store", dest="dist", choices=["JSD", "Eucl"],
default="JSD", help="distance method between two signatures: "
"Eucl : Euclidienne, JSD : Jensen-Shannon divergence [default:%(default)s]")
parser.add_argument("-s", "--strand", action="store", default="both", choices=["both", "plus", "minus"],
help="strand used to compute microcomposition. [default:%(default)s]")
parser.add_argument("-u", "--cpu", action="store", dest="threads_max", type=int, default=4,
help="how maany threads to use for windows microcomposition computation[default:%(default)d]")
parser.add_argument("-W", "--workdir", action="store", dest="workdir", default=".", help="working directory")
options = parser.parse_args()
return options
def main():
# get parameters
options = get_cmd()
print("Genome : {}".format(options.genome))
base_genome = os.path.basename(options.genome)
# preparing output file
if not os.path.isdir(options.workdir):
os.makedirs(options.workdir)
# read target sequence (host or genome)
if (not options.conta):
#no contaminant, genome is the target
target = options.genome
print("Contaminant : {}".format(None))
output = os.path.join(options.workdir, base_genome + ".mcp_windows_vs_whole_" + options.dist+".dist")
else:
base_conta = os.path.basename(options.conta)
print("Contaminant : {} ".format(options.conta))
output = base_genome+".mcp_hostwindows_vs_"
if options.host:
base_host = os.path.basename(options.host)
# the host is target
print("Host : {}".format(options.host))
target = options.host
output = os.path.join(options.workdir, output+"host_"+base_host+"_"+options.dist+".dist")
else:
#contaminant is provided but no host, genome is the target
print("Host : None, using whole genome".format(options.host))
output = os.path.join(options.workdir, output+"wholegenome_"+options.dist+".dist")
target = options.genome
if type(options.pattern) == int and not options.k:
options.pattern = "1" * options.pattern
elif not options.pattern and options.k:
options.pattern = "1" * options.k
# one vector shape of ksize**4
genome = compute_whole_composition(options.genome, options.pattern, options.strand, nb_jobs=options.threads_max)
if (not options.conta):
if (not options.windows_size and not options.windows_step):
print("Warning, no sliding window parameters (-w and -t )\n"
"The signature will be computed from the whole genome\n"
"Computing signature from the whole genome", file = sys.stderr)
output = os.path.join(options.workdir, base_genome+".microcomposition.mat")
with open(output, 'w') as outf:
outf.write(str(vector_to_matrix(genome)))
sys.exit(0)
elif (options.windows_size or options.windows_step):
print("Computing microcomposition signaure and distances to genome")
else:
# one vector shape of ksize**4
conta = compute_whole_composition(options.conta, options.pattern, options.strand, nb_jobs=options.threads_max)
with open(output, 'w') as outf:
for res in sliding_windows_distances(options.genome, mcp_comparison=genome, mth_dist=options.dist, pattern=options.pattern,
windows_size=options.windows_size, windows_step=options.windows_step, options=options):
for t in res:
outf.write(str("\t".join(map(str,t)))+"\n")
if options.conta:
output = os.path.join(options.workdir, base_genome+".mcp_hostwindows_vs_"+"conta_"+base_conta+"_"+options.dist+".dist")
with open(output, 'w') as outf:
for res in sliding_windows_distances(options.genome, mcp_comparison=conta, mth_dist=options.dist, pattern=options.pattern,
windows_size=options.windows_size, windows_step=options.windows_step, options=options):
for t in res:
outf.write(str("\t".join(map(str,t)))+"\n")
sys.exit(0)
if __name__ == "__main__":
main()
|
itsmeludo/ContaLocate
|
Kount.py
|
Python
|
gpl-3.0
| 23,261
|
[
"Biopython"
] |
4e89a51ef31b35aac90ecb3f2bbf574fa0e0fd0f7ca95b8d1c9c2506c520ebfb
|
import random
import math
import numpy as np
import scipy.interpolate as si
class Path:
"""
Represents a state of system in the lagrangian space (path
configurations X constraint).
"""
_maxVlambdaPert = 100.
_maxVertexPert = 0.01
_initialVlambda = 0.
_changeVlambdaProbability = 0.05
_numPointsSplineMultiplier = 10
_numSigmaGauss = 9
def __init__(self, initialVertexes, scene, optimizeVal):
"""
optimizeVal can be: 'length', 'meanAngle', 'maxAngle', 'meanCurvature', 'maxCurvature', 'maxCurvatureLength', 'maxDerivative2'
"""
self._vertexes = initialVertexes
self._scene = scene
self._optimizeVal = optimizeVal
self._dimR = self._vertexes.shape[0]
self._dimC = self._vertexes.shape[1]
self._numPointsSpline = self._numPointsSplineMultiplier * self._dimR
self._spline, splineD1, splineD2 = self._splinePoints(self._vertexes)
self._vlambda = self._initialVlambda
self._initialLength = self._calculateTotalLength(self._vertexes)
self._currentEnergy, self._currentLength, self._currentMeanAngle, self._currentMaxAngle, self._meanCurvature, self._maxCurvature, self._maxCurvatureLength, self._maxDerivative2, self._currentConstraints = self._initializePathEnergy(self._vertexes, self._spline, splineD1, splineD2, self._vlambda)
@property
def vertexes(self):
return self._vertexes
@property
def spline(self):
return self._spline
@property
def energy(self):
return self._currentEnergy
@property
def length(self):
return self._currentLength
@property
def meanAngle(self):
return self._currentMeanAngle
@property
def maxAngle(self):
return self._currentMaxAngle
@property
def meanCurvature(self):
return self._currentMeanCurvature
@property
def maxCurvature(self):
return self._currentMaxCurvature
@property
def maxCurvatureLength(self):
return self._currentMaxCurvatureLength
@property
def maxDerivative2(self):
return self._currentMaxDerivative2
@property
def constraints(self):
return self._currentConstraints
@property
def vlambda(self):
return self._vlambda
@property
def optimizeVal(self):
return self._optimizeVal
def tryMove(self, temperature, neighbourMode):
"""
Move the path or lambda multipiers in a neighbouring state,
with a certain acceptance probability.
Pick a random vertex (except extremes), and move
it in a random direction (with a maximum perturbance).
Use a lagrangian relaxation because we need to evaluate
min(measure(path)) given the constraint that all quadrilaters
formed by 4 consecutive points in the path must be collision
free; where measure(path) is, depending of the choose method,
the length of the path or the mean
of the supplementary angles of each pair of edges of the path.
If neighbourMode=0 then move the node uniformly, if
neighbourMode=1 then move the node with gaussian probabilities
with mean in the perpendicular direction respect to the
previous-next nodes axis.
"""
moveVlambda = random.random() < self._changeVlambdaProbability
if moveVlambda:
newVlambda = self._vlambda
newVlambda = newVlambda + (random.uniform(-1.,1.) * self._maxVlambdaPert)
newEnergy = self._calculatePathEnergyLambda(newVlambda)
#attention, different formula from below
if (newEnergy > self._currentEnergy) or (math.exp(-(self._currentEnergy-newEnergy)/temperature) >= random.random()):
self._vlambda = newVlambda
self._currentEnergy = newEnergy
else:
newVertexes = np.copy(self._vertexes)
movedV = random.randint(1,self._dimR - 2) #don't change extremes
if(neighbourMode == 0):
moveC = random.randint(0,self._dimC - 1)
newVertexes[movedV][moveC] = newVertexes[movedV][moveC] + (random.uniform(-1.,1.) * self._maxVertexPert)
else:
a = self._vertexes[movedV-1] - self._vertexes[movedV+1]
b = np.array([1,0])
alfa = math.acos(np.dot(a,b)/(np.linalg.norm(a)*np.linalg.norm(b))) - (math.pi/2)
randAng = self._truncGauss(math.pi/2, math.pi/(2*self._numSigmaGauss), 0, math.pi)
if random.randint(0,self._dimC - 1) == 1:
randAng = randAng + math.pi
randAng = randAng + alfa
randDist = random.uniform(-1.,1.) * self._maxVertexPert
newVertexes[movedV] = self._vertexes[movedV] + np.array([randDist * math.cos(randAng), randDist * math.sin(randAng)])
# newVertexes[movedV][0] = newVertex[0]
# newVertexes[movedV][1] = newVertex[1]
newSpline,newEnergy,newLength,newMeanAngle,newMaxAngle,newMeanCurvature,newMaxCurvature,newMaxCurvatureLength,newMaxDerivative2,newConstraints = self._calculatePathEnergyVertex(newVertexes, movedV)
#attention, different formula from above
if (newEnergy < self._currentEnergy) or (math.exp(-(newEnergy-self._currentEnergy)/temperature) >= random.random()):
self._vertexes = newVertexes
self._spline = newSpline
self._currentEnergy = newEnergy
self._currentLength = newLength
self._currentMeanAngle = newMeanAngle
self._currentMaxAngle = newMaxAngle
self._currentMeanCurvature = newMeanCurvature
self._currentMaxCurvature = newMaxCurvature
self._currentMaxCurvatureLength = newMaxCurvatureLength
self._currentMaxDerivative2 = newMaxDerivative2
self._currentConstraints = newConstraints
def _truncGauss(self, mu, sigma, bottom, top):
v = random.gauss(mu,sigma)
while not (bottom <= v <= top):
v = random.gauss(mu,sigma)
return v
def _initializePathEnergy(self, vertexes, spline, splineD1, splineD2, vlambda):
length = self._calculateTotalLength(vertexes)
meanAngle = 0.
maxAngle = 0.
for i in range(1, self._dimR - 1): #from 1 to dimR-2
currAngle = self._calculateAngle(vertexes[i-1], vertexes[i], vertexes[i+1])
meanAngle = meanAngle + currAngle
if currAngle > maxAngle:
maxAngle = currAngle
meanAngle = meanAngle / (self._dimR - 2)
meanCurvature = self._calculateMeanCurvature(spline, splineD1, splineD2)
maxCurvature = self._calculateMaxCurvature(spline, splineD1, splineD2)
maxCurvatureLength = self._calculateMaxCurvatureLength(spline, splineD1, splineD2, vertexes)
maxDerivative2 = self._calculateMaxDerivative2(splineD2)
constraints = self._calculateConstraints(spline)
if self._optimizeVal == 'length':
energy = length + vlambda * constraints
elif self._optimizeVal == 'meanAngle':
energy = meanAngle + vlambda * constraints
elif self._optimizeVal == 'maxAngle':
energy = maxAngle + vlambda * constraints
elif self._optimizeVal == 'meanCurvature':
energy = meanCurvature + vlambda * constraints
elif self._optimizeVal == 'maxCurvature':
energy = maxCurvature + vlambda * constraints
elif self._optimizeVal == 'maxCurvatureLength':
energy = maxCurvatureLength + vlambda * constraints
elif self._optimizeVal == 'maxDerivative2':
energy = maxDerivative2 + vlambda * constraints
return (energy, length, meanAngle, maxAngle, meanCurvature, maxCurvature, maxCurvatureLength, maxDerivative2, constraints)
def _calculatePathEnergyLambda(self, vlambda):
"""
calculate the energy when lambda is moved.
"""
return (self._currentEnergy - (self._vlambda * self._currentConstraints) + (vlambda * self._currentConstraints))
def _calculatePathEnergyVertex(self, vertexes, movedV):
"""
calculate the energy when a vertex is moved and returns it.
"""
spline, splineD1, splineD2 = self._splinePoints(vertexes)
constraints = self._calculateConstraints(spline)
length = 0.
meanAngle = 0.
maxAngle = 0.
meanCurvature = 0.
maxCurvature = 0.
maxCurvatureLength = 0.
maxDerivative2 = 0.
if self._optimizeVal == 'length':
length = self._calculateTotalLengthSimp(vertexes, movedV)
energy = length + self._vlambda * constraints
elif self._optimizeVal == 'meanAngle':
meanAngle = self._calculateMeanAngle(vertexes, movedV)
energy = meanAngle + self._vlambda * constraints
elif self._optimizeVal == 'maxAngle':
maxAngle = self._calculateMaxAngle(vertexes, movedV)
energy = maxAngle + self._vlambda * constraints
elif self._optimizeVal == 'meanCurvature':
meanCurvature = self._calculateMeanCurvature(spline, splineD1, splineD2)
energy = meanCurvature + self._vlambda * constraints
elif self._optimizeVal == 'maxCurvature':
maxCurvature = self._calculateMaxCurvature(spline, splineD1, splineD2)
energy = maxCurvature + self._vlambda * constraints
elif self._optimizeVal == 'maxCurvatureLength':
maxCurvatureLength = self._calculateMaxCurvatureLength(spline, splineD1, splineD2, vertexes)
energy = maxCurvatureLength + self._vlambda * constraints
elif self._optimizeVal == 'maxDerivative2':
maxDerivative2 = self._calculateMaxDerivative2(splineD2)
energy = maxDerivative2 + self._vlambda * constraints
return (spline, energy, length, meanAngle, maxAngle, meanCurvature, maxCurvature, maxCurvatureLength, maxDerivative2, constraints)
def _calculateTotalLength(self, vertexes):
length = 0.
for i in range(1, self._dimR):
length = length + np.linalg.norm(np.subtract(vertexes[i], vertexes[i-1]))
return length
def _calculateTotalLengthSimp(self, vertexes, movedV):
length = self._currentLength
length = length - self._calculateLength(self._vertexes[movedV], self._vertexes[movedV-1]) + self._calculateLength(vertexes[movedV], vertexes[movedV-1])
length = length - self._calculateLength(self._vertexes[movedV+1], self._vertexes[movedV]) + self._calculateLength(vertexes[movedV+1], vertexes[movedV])
return length
def _calculateMeanAngle(self, vertexes, movedV):
meanAngle = self._currentMeanAngle
if movedV >= 2:
meanAngle = meanAngle + (self._calculateAngle(vertexes[movedV-2], vertexes[movedV-1], vertexes[movedV]) - self._calculateAngle(self._vertexes[movedV-2], self._vertexes[movedV-1], self._vertexes[movedV])) / (self._dimR - 2)
meanAngle = meanAngle + (self._calculateAngle(vertexes[movedV-1], vertexes[movedV], vertexes[movedV+1]) - self._calculateAngle(self._vertexes[movedV-1], self._vertexes[movedV], self._vertexes[movedV+1])) / (self._dimR - 2)
if movedV < self._dimR-2:
meanAngle = meanAngle + (self._calculateAngle(vertexes[movedV], vertexes[movedV+1], vertexes[movedV+2]) - self._calculateAngle(self._vertexes[movedV], self._vertexes[movedV+1], self._vertexes[movedV+2])) / (self._dimR - 2)
return meanAngle
def _calculateMaxAngle(self, vertexes, movedV):
maxAngle = 0.
for i in range(1, self._dimR - 1): #from 1 to dimR-2
currAngle = self._calculateAngle(vertexes[i-1], vertexes[i], vertexes[i+1])
if currAngle > maxAngle:
maxAngle = currAngle
return maxAngle
def _calculateMeanCurvature(self, spline, splineD1, splineD2):
meanCurvature = 0.
for i in range(0, len(spline)):
d1Xd2 = np.cross(splineD1[i], splineD2[i])
Nd1Xd2 = np.linalg.norm(d1Xd2)
Nd1 = np.linalg.norm(splineD1[i])
currCurv = Nd1Xd2 / math.pow(Nd1,3)
meanCurvature += currCurv
meanCurvature = meanCurvature / len(spline)
return meanCurvature
def _calculateMaxCurvature(self, spline, splineD1, splineD2):
maxCurvature = 0.
for i in range(0, len(spline)):
d1Xd2 = np.cross(splineD1[i], splineD2[i])
Nd1Xd2 = np.linalg.norm(d1Xd2)
Nd1 = np.linalg.norm(splineD1[i])
currCurv = Nd1Xd2 / math.pow(Nd1,3)
if currCurv > maxCurvature:
maxCurvature = currCurv
return maxCurvature
def _calculateMaxCurvatureLength(self, spline, splineD1, splineD2, vertexes):
length = self._calculateTotalLength(vertexes)
normLength = length/self._initialLength * 100 #for making the ratio indipendent of the initial length
maxCurvature = 0.
for i in range(0, len(spline)):
d1Xd2 = np.cross(splineD1[i], splineD2[i])
Nd1Xd2 = np.linalg.norm(d1Xd2)
Nd1 = np.linalg.norm(splineD1[i])
currCurv = Nd1Xd2 / math.pow(Nd1,3)
if currCurv > maxCurvature:
maxCurvature = currCurv
ratioCurvLen = 0.1 #0: all length; 1: all maxCurvature
return ratioCurvLen*maxCurvature + (1-ratioCurvLen)*normLength
def _calculateMaxDerivative2(self, splineD2):
maxDerivative2 = 0.
for i in range(0, len(splineD2)):
currDer2 = np.linalg.norm(splineD2[i])
if currDer2 > maxDerivative2:
maxDerivative2 = currDer2
return maxDerivative2
def _calculateConstraints(self, spline):
"""
calculate the constraints function. Is the ratio of the points
of the calculated spline that are inside obstacles respect the
total number of points of the spline.
"""
pointsInside = 0
for p in spline:
if self._scene.isInside(p):
pointsInside = pointsInside + 1
constraints = pointsInside / self._numPointsSpline
return constraints
def _splinePoints(self, vertexes):
degree=4
x = vertexes[:,0]
y = vertexes[:,1]
t = np.linspace(0, 1, len(vertexes) - degree + 1, endpoint=True)
t = np.append([0]*degree, t)
t = np.append(t, [1]*degree)
tck = [t,[x,y], degree]
u=np.linspace(0,1,self._numPointsSpline,endpoint=True)
out = si.splev(u, tck)
outD1 = si.splev(u, tck, 1)
outD2 = si.splev(u, tck, 2)
spline = np.stack(out).T
splineD1 = np.stack(outD1).T
splineD2 = np.stack(outD2).T
return (spline, splineD1, splineD2)
def _calculateLength(self, a, b):
return np.linalg.norm(np.subtract(a, b))
def _calculateAngle(self, a, b, c):
#return 1. + (np.dot(np.subtract(a,b), np.subtract(c,b)) / (np.linalg.norm(np.subtract(a,b)) * np.linalg.norm(np.subtract(c,b))))
return 1. + (np.dot(np.subtract(b,a), np.subtract(b,c)) / (np.linalg.norm(np.subtract(b,a)) * np.linalg.norm(np.subtract(b,c))))
def plot(self, plotter, plotStartEnd=True, plotInnerVertexes=False, plotEdges=True, plotSpline=True):
if plotEdges:
plotter.plot(self._vertexes[:,0], self._vertexes[:,1], 'r--')
if plotStartEnd:
plotter.plot(self._vertexes[0,0], self._vertexes[0,1], 'ro')
plotter.plot(self._vertexes[-1,0], self._vertexes[-1,1], 'ro')
if plotInnerVertexes:
plotter.plot(self._vertexes[1:-1,0], self._vertexes[1:-1,1], 'ro')
if plotSpline:
plotter.plot(self._spline[:,0], self._spline[:,1], 'r', lw=2)
|
trianam/dissertation
|
plotters/voronoiPath/path.py
|
Python
|
cc0-1.0
| 16,259
|
[
"Gaussian"
] |
68034feafff1729e3bc8cf8141bc31144d817b74434a4298ae6e5b637aa8b77a
|
import dataclasses
import typing
from random import Random
from typing import Dict, Iterator
import randovania
from randovania.game_description import default_database
from randovania.game_description.assignment import NodeConfigurationAssignment, PickupTarget
from randovania.game_description.default_database import default_prime2_memo_data
from randovania.game_description.game_description import GameDescription
from randovania.game_description.game_patches import GamePatches, ElevatorConnection
from randovania.game_description.requirements import Requirement, RequirementAnd, ResourceRequirement
from randovania.game_description.resources.item_resource_info import ItemResourceInfo
from randovania.game_description.resources.pickup_entry import PickupModel
from randovania.game_description.resources.resource_database import ResourceDatabase
from randovania.game_description.resources.resource_info import CurrentResources, ResourceGain
from randovania.game_description.resources.resource_type import ResourceType
from randovania.game_description.world.area_identifier import AreaIdentifier
from randovania.game_description.world.node import TeleporterNode
from randovania.game_description.world.node_identifier import NodeIdentifier
from randovania.game_description.world.world_list import WorldList
from randovania.games.game import RandovaniaGame
from randovania.games.prime2.layout.echoes_configuration import EchoesConfiguration
from randovania.games.prime2.layout.echoes_cosmetic_patches import EchoesCosmeticPatches
from randovania.games.prime2.layout.hint_configuration import HintConfiguration, SkyTempleKeyHintMode
from randovania.games.prime2.patcher.echoes_dol_patcher import EchoesDolPatchesData
from randovania.generator.item_pool import pickup_creator
from randovania.interface_common.players_configuration import PlayersConfiguration
from randovania.layout.base.base_configuration import BaseConfiguration
from randovania.layout.layout_description import LayoutDescription
from randovania.layout.lib.teleporters import TeleporterShuffleMode
from randovania.patching.prime import elevators
from randovania.patching.prime.patcher_file_lib import (
sky_temple_key_hint, item_names, pickup_exporter, hints, credits_spoiler, hint_lib,
)
_EASTER_EGG_RUN_VALIDATED_CHANCE = 1024
_EASTER_EGG_SHINY_MISSILE = 8192
_ENERGY_CONTROLLER_MAP_ASSET_IDS = [
618058071, # Agon EC
724159530, # Torvus EC
988679813, # Sanc EC
]
_ELEVATOR_ROOMS_MAP_ASSET_IDS = [
# 0x529F0152, # Sky Temple Energy Controller
0xAE06A5D9, # Sky Temple Gateway
# cliff
0x1C7CBD3E, # agon
0x92A2ADA3, # Torvus
0xFB9E9C00, # Entrance
0x74EFFB3C, # Aerie
0x932CB12E, # Aerie Transport Station
# sand
0xEF5EA06C, # Sanc
0x8E9B3B3F, # Torvus
0x7E1BC16F, # Entrance
# swamp
0x46B0EECF, # Entrance
0xE6B06473, # Agon
0x96DB1F15, # Sanc
# tg -> areas
0x4B2A6FD3, # Agon
0x85E70805, # Torvus
0xE4229356, # Sanc
# tg -> gt
0x79EFFD7D,
0x65168477,
0x84388E13,
# gt -> tg
0xA6D44A39,
0x318EBBCD,
0xB1B5308D,
]
def item_id_for_item_resource(resource: ItemResourceInfo) -> int:
return resource.extra["item_id"]
def _area_identifier_to_json(world_list: WorldList, identifier: AreaIdentifier) -> dict:
world = world_list.world_by_area_location(identifier)
area = world.area_by_identifier(identifier)
return {
"world_asset_id": world.extra['asset_id'],
"area_asset_id": area.extra['asset_id'],
}
def _create_spawn_point_field(patches: GamePatches,
game: GameDescription,
) -> dict:
capacities = [
{
"index": item_id_for_item_resource(item),
"amount": patches.starting_items.get(item, 0),
}
for item in game.resource_database.item
]
return {
"location": _area_identifier_to_json(game.world_list, patches.starting_location),
"amount": capacities,
"capacity": capacities,
}
def _pretty_name_for_elevator(game: RandovaniaGame,
world_list: WorldList,
original_teleporter_node: TeleporterNode,
connection: AreaIdentifier,
) -> str:
"""
Calculates the name the room that contains this elevator should have
:param world_list:
:param original_teleporter_node:
:param connection:
:return:
"""
if original_teleporter_node.keep_name_when_vanilla:
if original_teleporter_node.default_connection == connection:
return world_list.nodes_to_area(original_teleporter_node).name
return "Transport to {}".format(elevators.get_elevator_or_area_name(game, world_list, connection, False))
def _create_elevators_field(patches: GamePatches, game: GameDescription) -> list:
"""
Creates the elevator entries in the patcher file
:param patches:
:param game:
:return:
"""
world_list = game.world_list
elevator_connection = patches.elevator_connection
nodes_by_teleporter = _get_nodes_by_teleporter_id(world_list)
if len(elevator_connection) != len(nodes_by_teleporter):
raise ValueError("Invalid elevator count. Expected {}, got {}.".format(
len(nodes_by_teleporter), len(elevator_connection)
))
elevator_fields = []
for teleporter, connection in elevator_connection.items():
node = world_list.node_by_identifier(teleporter)
assert isinstance(node, TeleporterNode)
elevator_fields.append({
"instance_id": node.extra["teleporter_instance_id"],
"origin_location": _area_identifier_to_json(game.world_list, teleporter.area_location),
"target_location": _area_identifier_to_json(game.world_list, connection),
"room_name": _pretty_name_for_elevator(game.game, world_list, node, connection)
})
return elevator_fields
def _get_nodes_by_teleporter_id(world_list: WorldList) -> Dict[NodeIdentifier, TeleporterNode]:
return {
world_list.identifier_for_node(node): node
for node in world_list.all_nodes
if isinstance(node, TeleporterNode) and node.editable
}
def translator_index_for_requirement(requirement: Requirement) -> int:
assert isinstance(requirement, RequirementAnd)
assert 1 <= len(requirement.items) <= 2
items: set = set()
for req in requirement.items:
assert isinstance(req, ResourceRequirement)
assert req.amount == 1
assert not req.negate
assert isinstance(req.resource, ItemResourceInfo)
items.add(item_id_for_item_resource(req.resource))
# Remove Scan Visor, as it should always be present
items.remove(9)
for it in items:
return it
# If nothing is present, then return Scan Visor as "free"
return 9
def _create_translator_gates_field(game: GameDescription, gate_assignment: NodeConfigurationAssignment) -> list:
"""
Creates the translator gate entries in the patcher file
:param gate_assignment:
:return:
"""
return [
{
"gate_index": game.world_list.node_by_identifier(identifier).extra["gate_index"],
"translator_index": translator_index_for_requirement(requirement),
}
for identifier, requirement in gate_assignment.items()
]
def _apply_translator_gate_patches(specific_patches: dict, elevator_shuffle_mode: TeleporterShuffleMode) -> None:
"""
:param specific_patches:
:param elevator_shuffle_mode:
:return:
"""
specific_patches["always_up_gfmc_compound"] = True
specific_patches["always_up_torvus_temple"] = True
specific_patches["always_up_great_temple"] = elevator_shuffle_mode != TeleporterShuffleMode.VANILLA
def _create_elevator_scan_port_patches(
game: RandovaniaGame,
world_list: WorldList,
elevator_connection: ElevatorConnection,
) -> Iterator[dict]:
nodes_by_teleporter_id = _get_nodes_by_teleporter_id(world_list)
for teleporter, node in nodes_by_teleporter_id.items():
if node.extra.get("scan_asset_id") is None:
continue
target_area_name = elevators.get_elevator_or_area_name(game, world_list, elevator_connection[teleporter], True)
yield {
"asset_id": node.extra["scan_asset_id"],
"strings": [f"Access to &push;&main-color=#FF3333;{target_area_name}&pop; granted.", ""],
}
def _logbook_title_string_patches():
return [
{
"asset_id": 3271034066,
"strings": [
'Hints', 'Violet', 'Cobalt', 'Technology', 'Keys 1, 2, 3', 'Keys 7, 8, 9', 'Regular Hints',
'Emerald', 'Amber', '&line-spacing=75;Flying Ing\nCache Hints', 'Keys 4, 5, 6', 'Keys 1, 2, 3',
'&line-spacing=75;Torvus Energy\nController', 'Underground Tunnel', 'Training Chamber',
'Catacombs', 'Gathering Hall', '&line-spacing=75;Fortress\nTransport\nAccess',
'&line-spacing=75;Hall of Combat\nMastery', 'Main Gyro Chamber',
'&line-spacing=75;Sanctuary\nEnergy\nController', 'Main Research', 'Watch Station',
'Sanctuary Entrance', '&line-spacing=75;Transport to\nAgon Wastes', 'Mining Plaza',
'&line-spacing=75;Agon Energy\nController', 'Portal Terminal', 'Mining Station B',
'Mining Station A', 'Meeting Grounds', 'Path of Eyes', 'Path of Roots',
'&line-spacing=75;Main Energy\nController', "Champions of Aether",
'&line-spacing=75;Central\nMining\nStation', 'Main Reactor', 'Torvus Lagoon', 'Catacombs',
'Sanctuary Entrance', "Dynamo Works", 'Storage Cavern A', 'Landing Site', 'Industrial Site',
'&line-spacing=75;Sky Temple\nKey Hints', 'Keys 7, 8, 9', 'Keys 4, 5, 6', 'Sky Temple Key 1',
'Sky Temple Key 2', 'Sky Temple Key 3', 'Sky Temple Key 4', 'Sky Temple Key 5',
'Sky Temple Key 6', 'Sky Temple Key 7', 'Sky Temple Key 8', 'Sky Temple Key 9'
],
}, {
"asset_id": 2301408881,
"strings": [
'Research', 'Mechanisms', 'Luminoth Technology', 'Biology', 'GF Security', 'Vehicles',
'Aether Studies', 'Aether', 'Dark Aether', 'Phazon', 'Sandgrass', 'Blueroot Tree',
'Ing Webtrap',
'Webling', 'U-Mos', 'Bladepod', 'Ing Storage', 'Flying Ing Cache', 'Torvus Bearerpod',
'Agon Bearerpod', 'Ingworm Cache', 'Ingsphere Cache', 'Plantforms', 'Darklings',
'GF Gate Mk VI',
'GF Gate Mk VII', 'GF Lock Mk V', 'GF Defense Shield', 'Kinetic Orb Cannon', 'GF Bridge',
"Samus's Gunship", 'GFS Tyr', 'Pirate Skiff', 'Visors', 'Weapon Systems', 'Armor',
'Morph Ball Systems', 'Movement Systems', 'Beam Weapons', 'Scan Visor', 'Combat Visor',
'Dark Visor',
'Echo Visor', 'Morph Ball', 'Boost Ball', 'Spider Ball', 'Morph Ball Bomb', 'Power Bomb',
'Dark Bomb', 'Light Bomb', 'Annihilator Bomb', 'Space Jump Boots', 'Screw Attack',
'Gravity Boost',
'Grapple Beam', 'Varia Suit', 'Dark Suit', 'Light Suit', 'Power Beam', 'Dark Beam',
'Light Beam',
'Annihilator Beam', 'Missile Launcher', 'Seeker Missile Launcher', 'Super Missile',
'Sonic Boom',
'Darkburst', 'Sunburst', 'Charge Beam', 'Missile Systems', 'Charge Combos', 'Morph Balls',
'Bomb Systems', 'Miscellaneous', 'Dark Temple Keys', 'Bloatsac', 'Luminoth Technology',
'Light Beacons', 'Light Crystals', 'Lift Crystals', 'Utility Crystals', 'Light Crystal',
'Energized Crystal', 'Nullified Crystal', 'Super Crystal', 'Light Beacon', 'Energized Beacon',
'Nullified Beacon', 'Super Beacon', 'Inactive Beacon', 'Dark Lift Crystal',
'Light Lift Crystal',
'Liftvine Crystal', 'Torvus Hanging Pod', 'Sentinel Crystal', 'Dark Sentinel Crystal',
'Systems',
'Bomb Slot', 'Spinner', 'Grapple Point', 'Spider Ball Track', 'Energy Tank',
'Beam Ammo Expansion',
'Missile Expansion', 'Dark Agon Keys', 'Dark Torvus Keys', 'Ing Hive Keys', 'Sky Temple Keys',
'Temple Grounds', 'Sanctuary Fortress', 'Torvus Bog', 'Agon Wastes', 'Dark Agon Temple Key 1',
'Dark Agon Temple Key 2', 'Dark Agon Temple Key 3', 'Dark Torvus Temple Key 1',
'Dark Torvus Temple Key 2', 'Dark Torvus Temple Key 3', 'Ing Hive Temple Key 1',
'Ing Hive Temple Key 2', 'Ing Hive Temple Key 3', 'Sky Temple Key 1', 'Sky Temple Key 2',
'Sky Temple Key 3', 'Sky Temple Key 4', 'Sky Temple Key 5', 'Sky Temple Key 6',
'Sky Temple Key 7',
'Sky Temple Key 8', 'Sky Temple Key 9', 'Suit Expansions', 'Charge Combo', 'Ingclaw',
'Dormant Ingclaw', 'Power Bomb Expansion', 'Energy Transfer Module', 'Cocoons',
'Splinter Cocoon',
'War Wasp Hive', 'Metroid Cocoon', 'Dark Aether', 'Aether', 'Dark Portal', 'Light Portal',
'Energy Controller', 'Wall Jump Surface',
]
},
]
def _akul_testament_string_patch():
# update after each tournament! ordered from newest to oldest
champs = [
{
"title": "2021 Champion",
"name": "Dyceron"
},
{
"title": "2020 Champion",
"name": "Dyceron"
}
]
title = "Metroid Prime 2: Echoes Randomizer Tournament"
champstring = '\n'.join(
[f'{champ["title"]}: {hint_lib.color_text(hint_lib.TextColor.PLAYER, champ["name"])}' for champ in champs])
latest = champstring.partition("\n")[0]
return [
{
"asset_id": 0x080BBD00,
"strings": [
'Luminoth Datapac translated.\n(Champions of Aether)',
f"{title}\n\n{latest}",
f"{title}\n\n{champstring}",
],
},
]
def _create_string_patches(hint_config: HintConfiguration,
game: GameDescription,
all_patches: Dict[int, GamePatches],
area_namers: Dict[int, hint_lib.AreaNamer],
players_config: PlayersConfiguration,
rng: Random,
) -> list:
"""
:param hint_config:
:param game:
:param all_patches:
:return:
"""
patches = all_patches[players_config.player_index]
string_patches = []
string_patches.extend(_akul_testament_string_patch())
# Location Hints
string_patches.extend(
hints.create_hints(all_patches, players_config, game.world_list, area_namers, rng)
)
# Sky Temple Keys
stk_mode = hint_config.sky_temple_keys
if stk_mode == SkyTempleKeyHintMode.DISABLED:
string_patches.extend(sky_temple_key_hint.hide_hints())
else:
string_patches.extend(sky_temple_key_hint.create_hints(
all_patches, players_config, game.resource_database,
area_namers,
stk_mode == SkyTempleKeyHintMode.HIDE_AREA))
# Elevator Scans
string_patches.extend(_create_elevator_scan_port_patches(game.game, game.world_list, patches.elevator_connection))
string_patches.extend(_logbook_title_string_patches())
return string_patches
def _create_starting_popup(layout_configuration: EchoesConfiguration,
resource_database: ResourceDatabase,
starting_items: CurrentResources) -> list:
extra_items = item_names.additional_starting_items(layout_configuration, resource_database, starting_items)
if extra_items:
return [
"Extra starting items:",
", ".join(extra_items)
]
else:
return []
def _simplified_memo_data() -> Dict[str, str]:
result = pickup_exporter.GenericAcquiredMemo()
result["Locked Power Bomb Expansion"] = ("Power Bomb Expansion acquired, "
"but the main Power Bomb is required to use it.")
result["Locked Missile Expansion"] = "Missile Expansion acquired, but the Missile Launcher is required to use it."
result["Locked Seeker Launcher"] = "Seeker Launcher acquired, but the Missile Launcher is required to use it."
return result
def _get_model_name_missing_backup():
"""
A mapping of alternative model names if some models are missing.
:return:
"""
other_game = {
PickupModel(RandovaniaGame.METROID_PRIME, "Charge Beam"): "ChargeBeam INCOMPLETE",
PickupModel(RandovaniaGame.METROID_PRIME, "Super Missile"): "SuperMissile",
PickupModel(RandovaniaGame.METROID_PRIME, "Scan Visor"): "ScanVisor INCOMPLETE",
PickupModel(RandovaniaGame.METROID_PRIME, "Varia Suit"): "VariaSuit INCOMPLETE",
PickupModel(RandovaniaGame.METROID_PRIME, "Gravity Suit"): "VariaSuit INCOMPLETE",
PickupModel(RandovaniaGame.METROID_PRIME, "Phazon Suit"): "VariaSuit INCOMPLETE",
# PickupModel(RandovaniaGame.PRIME1, "Morph Ball"): "MorphBall INCOMPLETE",
PickupModel(RandovaniaGame.METROID_PRIME, "Morph Ball Bomb"): "MorphBallBomb",
PickupModel(RandovaniaGame.METROID_PRIME, "Boost Ball"): "BoostBall",
PickupModel(RandovaniaGame.METROID_PRIME, "Spider Ball"): "SpiderBall",
PickupModel(RandovaniaGame.METROID_PRIME, "Power Bomb"): "PowerBomb",
PickupModel(RandovaniaGame.METROID_PRIME, "Power Bomb Expansion"): "PowerBombExpansion",
PickupModel(RandovaniaGame.METROID_PRIME, "Missile"): "MissileExpansionPrime1",
PickupModel(RandovaniaGame.METROID_PRIME, "Grapple Beam"): "GrappleBeam",
PickupModel(RandovaniaGame.METROID_PRIME, "Space Jump Boots"): "SpaceJumpBoots",
PickupModel(RandovaniaGame.METROID_PRIME, "Energy Tank"): "EnergyTank",
}
return {
f"{model.game.value}_{model.name}": name
for model, name in other_game.items()
}
def _get_model_mapping(randomizer_data: dict):
jingles = {
"SkyTempleKey": 2,
"DarkTempleKey": 2,
"MissileExpansion": 0,
"PowerBombExpansion": 0,
"DarkBeamAmmoExpansion": 0,
"LightBeamAmmoExpansion": 0,
"BeamAmmoExpansion": 0,
}
return EchoesModelNameMapping(
index={
entry["Name"]: entry["Index"]
for entry in randomizer_data["ModelData"]
},
sound_index={
"SkyTempleKey": 1,
"DarkTempleKey": 1,
},
jingle_index={
entry["Name"]: jingles.get(entry["Name"], 1)
for entry in randomizer_data["ModelData"]
},
)
def create_patcher_file(description: LayoutDescription,
players_config: PlayersConfiguration,
cosmetic_patches: EchoesCosmeticPatches,
) -> dict:
"""
:param description:
:param players_config:
:param cosmetic_patches:
:return:
"""
preset = description.permalink.get_preset(players_config.player_index)
configuration = typing.cast(EchoesConfiguration, preset.configuration)
patches = description.all_patches[players_config.player_index]
rng = Random(description.permalink.seed_number)
area_namers = {index: hint_lib.AreaNamer(default_database.game_description_for(preset.game).world_list)
for index, preset in description.permalink.presets.items()}
game = default_database.game_description_for(RandovaniaGame.METROID_PRIME_ECHOES)
result = {}
_add_header_data_to_result(description, result)
result["publisher_id"] = "0R"
if configuration.menu_mod:
result["publisher_id"] = "1R"
result["convert_other_game_assets"] = cosmetic_patches.convert_other_game_assets
result["credits"] = "\n\n\n\n\n" + credits_spoiler.prime_trilogy_credits(
configuration.major_items_configuration,
description.all_patches,
players_config,
area_namers,
"&push;&main-color=#89D6FF;Major Item Locations&pop;",
"&push;&main-color=#33ffd6;{}&pop;",
)
[item_category_visors] = [cat for cat in configuration.major_items_configuration.default_items.keys() if cat.name ==
"visor"]
[item_category_beams] = [cat for cat in configuration.major_items_configuration.default_items.keys() if cat.name ==
"beam"]
result["menu_mod"] = configuration.menu_mod
result["dol_patches"] = EchoesDolPatchesData(
energy_per_tank=configuration.energy_per_tank,
beam_configuration=configuration.beam_configuration,
safe_zone_heal_per_second=configuration.safe_zone.heal_per_second,
user_preferences=cosmetic_patches.user_preferences,
default_items={
"visor": configuration.major_items_configuration.default_items[item_category_visors].name,
"beam": configuration.major_items_configuration.default_items[item_category_beams].name,
},
unvisited_room_names=(configuration.elevators.can_use_unvisited_room_names
and cosmetic_patches.unvisited_room_names),
teleporter_sounds=cosmetic_patches.teleporter_sounds or configuration.elevators.is_vanilla,
dangerous_energy_tank=configuration.dangerous_energy_tank,
).as_json
# Add Spawn Point
result["spawn_point"] = _create_spawn_point_field(patches, game)
result["starting_popup"] = _create_starting_popup(configuration, game.resource_database, patches.starting_items)
# Add the pickups
result["pickups"] = _create_pickup_list(cosmetic_patches, configuration, game, patches, players_config, rng)
# Add the elevators
result["elevators"] = _create_elevators_field(patches, game)
# Add translators
result["translator_gates"] = _create_translator_gates_field(game, patches.configurable_nodes)
# Scan hints
result["string_patches"] = _create_string_patches(configuration.hints, game, description.all_patches,
area_namers, players_config, rng)
# TODO: if we're starting at ship, needs to collect 9 sky temple keys and want item loss,
# we should disable hive_chamber_b_post_state
result["specific_patches"] = {
"hive_chamber_b_post_state": True,
"intro_in_post_state": True,
"warp_to_start": configuration.warp_to_start,
"credits_length": 75 if cosmetic_patches.speed_up_credits else 259,
"disable_hud_popup": cosmetic_patches.disable_hud_popup,
"pickup_map_icons": cosmetic_patches.pickup_markers,
"full_map_at_start": cosmetic_patches.open_map,
"dark_world_varia_suit_damage": configuration.varia_suit_damage,
"dark_world_dark_suit_damage": configuration.dark_suit_damage,
"hud_color": cosmetic_patches.hud_color if cosmetic_patches.use_hud_color else None,
}
result["logbook_patches"] = [
{"asset_id": 25, "connections": [81, 166, 195], },
{"asset_id": 38, "connections": [4, 33, 120, 251, 364], },
{"asset_id": 60, "connections": [38, 74, 154, 196], },
{"asset_id": 74, "connections": [59, 75, 82, 102, 260], },
{"asset_id": 81, "connections": [148, 151, 156], },
{"asset_id": 119, "connections": [60, 254, 326], },
{"asset_id": 124, "connections": [35, 152, 355], },
{"asset_id": 129, "connections": [29, 118, 367], },
{"asset_id": 154, "connections": [169, 200, 228, 243, 312, 342], },
{"asset_id": 166, "connections": [45, 303, 317], },
{"asset_id": 194, "connections": [1, 6], },
{"asset_id": 195, "connections": [159, 221, 231], },
{"asset_id": 196, "connections": [17, 19, 23, 162, 183, 379], },
{"asset_id": 233, "connections": [58, 191, 373], },
{"asset_id": 241, "connections": [223, 284], },
{"asset_id": 254, "connections": [129, 233, 319], },
{"asset_id": 318, "connections": [119, 216, 277, 343], },
{"asset_id": 319, "connections": [52, 289, 329], },
{"asset_id": 326, "connections": [124, 194, 241, 327], },
{"asset_id": 327, "connections": [46, 275], },
]
if not configuration.elevators.is_vanilla and (cosmetic_patches.unvisited_room_names
and configuration.elevators.can_use_unvisited_room_names):
exclude_map_ids = _ELEVATOR_ROOMS_MAP_ASSET_IDS
else:
exclude_map_ids = []
result["maps_to_always_reveal"] = _ENERGY_CONTROLLER_MAP_ASSET_IDS
result["maps_to_never_reveal"] = exclude_map_ids
_apply_translator_gate_patches(result["specific_patches"], configuration.elevators.mode)
return result
def _create_pickup_list(cosmetic_patches: EchoesCosmeticPatches, configuration: BaseConfiguration,
game: GameDescription,
patches: GamePatches, players_config: PlayersConfiguration,
rng: Random):
useless_target = PickupTarget(pickup_creator.create_echoes_useless_pickup(game.resource_database),
players_config.player_index)
if cosmetic_patches.disable_hud_popup:
memo_data = _simplified_memo_data()
else:
memo_data = default_prime2_memo_data()
pickup_list = pickup_exporter.export_all_indices(
patches,
useless_target,
game.world_list,
rng,
configuration.pickup_model_style,
configuration.pickup_model_data_source,
exporter=pickup_exporter.create_pickup_exporter(game, memo_data, players_config),
visual_etm=pickup_creator.create_visual_etm(),
)
return [
echoes_pickup_details_to_patcher(details, rng)
for details in pickup_list
]
def _add_header_data_to_result(description: LayoutDescription, result: dict) -> None:
result["permalink"] = "-permalink-"
result["seed_hash"] = f"- {description.shareable_word_hash} ({description.shareable_hash})"
result["shareable_hash"] = description.shareable_hash
result["shareable_word_hash"] = description.shareable_word_hash
result["randovania_version"] = randovania.VERSION
@dataclasses.dataclass(frozen=True)
class EchoesModelNameMapping:
index: Dict[str, int]
sound_index: Dict[str, int] # 1 for keys, 0 otherwise
jingle_index: Dict[str, int] # 2 for keys, 1 for major items, 0 otherwise
def _create_pickup_resources_for(resources: ResourceGain):
return [
{
"index": resource.extra["item_id"],
"amount": quantity
}
for resource, quantity in resources
if quantity > 0 and resource.resource_type == ResourceType.ITEM
]
def echoes_pickup_details_to_patcher(details: pickup_exporter.ExportedPickupDetails, rng: Random) -> dict:
if details.model.game == RandovaniaGame.METROID_PRIME_ECHOES:
model_name = details.model.name
else:
model_name = f"{details.model.game.value}_{details.model.name}"
if model_name == "MissileExpansion" and rng.randint(0, _EASTER_EGG_SHINY_MISSILE) == 0:
# If placing a missile expansion model, replace with Dark Missile Trooper model with a 1/8192 chance
model_name = "MissileExpansionPrime1"
hud_text = details.hud_text
if hud_text == ["Energy Transfer Module acquired!"] and (
rng.randint(0, _EASTER_EGG_RUN_VALIDATED_CHANCE) == 0):
hud_text = ["Run validated!"]
return {
"pickup_index": details.index.index,
"resources": _create_pickup_resources_for(details.conditional_resources[0].resources),
"conditional_resources": [
{
"item": conditional.item.extra["item_id"],
"resources": _create_pickup_resources_for(conditional.resources),
}
for conditional in details.conditional_resources[1:]
],
"convert": [
{
"from_item": conversion.source.extra["item_id"],
"to_item": conversion.target.extra["item_id"],
"clear_source": conversion.clear_source,
"overwrite_target": conversion.overwrite_target,
}
for conversion in details.conversion
],
"hud_text": hud_text,
"scan": details.scan_text,
"model_name": model_name,
}
def adjust_model_name(patcher_data: dict, randomizer_data: dict):
mapping = _get_model_mapping(randomizer_data)
backup = _get_model_name_missing_backup()
for pickup in patcher_data["pickups"]:
model_name = pickup.pop("model_name")
if model_name not in mapping.index:
model_name = backup.get(model_name, "EnergyTransferModule")
pickup["model_index"] = mapping.index[model_name]
pickup["sound_index"] = mapping.sound_index.get(model_name, 0)
pickup["jingle_index"] = mapping.jingle_index.get(model_name, 0)
|
henriquegemignani/randovania
|
randovania/games/prime2/patcher/claris_patcher_file.py
|
Python
|
gpl-3.0
| 29,348
|
[
"Amber",
"CRYSTAL"
] |
e3f5cfb4592934ace9bacba80460408ad6f01d41ff5bc160c585653387a636d1
|
from .. import lbx
from .. import savegame
from space_object import SpaceObject
__author__ = "peterman"
__date__ = "$May 15, 2010 9:12:05 AM$"
class Star(SpaceObject):
def __init__(self, star_id):
self.set_id(star_id)
# /__init__
def set_size(self, size):
self.__size = size
def get_size(self):
return self.__size
def set_pict_type(self, pict_type):
self.__pict_type = pict_type
def get_pict_type(self):
return self.__pict_type
def set_class(self, cl):
self.__class = cl
def get_class(self):
return self.__class
def set_objects(self, objects):
self.__objects = objects
def get_objects(self):
return self.__objects
def visited(self):
return self.__visited
def wormhole(self):
return self.__wormhole
def visited_by_player(self, player_id):
return self.__visited & (1 << player_id)
def get_data(self):
return self.__moo2data
def is_in_nebula(self):
return self.__is_in_nebula
def import_from_moo2(self, data):
self.__moo2data = data
self.set_name(lbx.read_string(data, 0, 15))
self.set_x(lbx.read_short_int(data, 0x0f))
self.set_y(lbx.read_short_int(data, 0x11))
self.set_size(lbx.read_byte(data, 0x13))
self.__owner = lbx.read_byte(data, 0x14) # primary owner
self.set_pict_type(lbx.read_byte(data, 0x15))
self.set_class(lbx.read_byte(data, 0x16))
self.__last_planet_selected = [
lbx.read_byte(data, 0x17),
lbx.read_byte(data, 0x18),
lbx.read_byte(data, 0x19),
lbx.read_byte(data, 0x1A),
lbx.read_byte(data, 0x1B),
lbx.read_byte(data, 0x1C),
lbx.read_byte(data, 0x1D),
lbx.read_byte(data, 0x1E)
]
self.__special = lbx.read_byte(data, 0x28)
self.__wormhole = lbx.read_byte(data, 0x29)
self.__blockaded_players = bitmask_to_player_id_list(ord(data[0x2a]))
self.__blockaded_by_bitmask = [ord(data[0x2b]), ord(data[0x2c]), ord(data[0x2d]), ord(data[0x2e]), ord(data[0x2f]), ord(data[0x30]), ord(data[0x31]), ord(data[0x32])]
self.__visited = ord(data[0x33]) # bitmask as boleans for each player
self.__just_visited_bitmask = ord(data[0x34]) # players bitmask to track first visit of this star -> user should get report
self.__ignore_colony_ship_bitmask = ord(data[0x35]) # players bitmask to track if player chose to not use a colony ship, cleared on every new colony ship here?
self.__ignore_combat_bitmask = ord(data[0x36]) # players bitmask to track if player chose to ignore combat ships = perform blockade only do not fight here?
self.__colonize_player = ord(data[0x37]) # 0..7 or -1
self.__colonies_bitmask = ord(data[0x38]) # has colony / players bitmask / redundant info?
self.__interdictors_bitmask = ord(data[0x39]) # has warp interdictor / players bitmask
self.__next_wfi_in_list = ord(data[0x3a]) # bookeeping ???
self.__tachyon_com_bitmask = ord(data[0x3b]) # has tachyon communicator / players bitmask
self.__subspace_com_bitmask = ord(data[0x3c]) # has subspace communicator / players bitmask
self.__stargates_bitmask = ord(data[0x3d]) # has stargate / players bitmask
self.__jumpgates_bitmask = ord(data[0x3e]) # has jumpgate / players bitmask
self.__artemis_bitmask = ord(data[0x3f]) # has artemis net players bitmask
self.__portals_bitmask = ord(data[0x40]) # has dimension portal / players bitmask
self.__stagepoint_bitmask = ord(data[0x41]) # bitvector tells whether star is stagepoint for each AI
self.__players_officers = [ord(data[0x42]), ord(data[0x43]), ord(data[0x44]), ord(data[0x45]), ord(data[0x46]), ord(data[0x47]), ord(data[0x48]), ord(data[0x49])]
self.set_objects([
lbx.read_short_int(data, 0x4a),
lbx.read_short_int(data, 0x4c),
lbx.read_short_int(data, 0x4e),
lbx.read_short_int(data, 0x50),
lbx.read_short_int(data, 0x52)
])
self.__surrender_to = [ord(data[0x67]), ord(data[0x68]), ord(data[0x69]), ord(data[0x6a]), ord(data[0x6b]), ord(data[0x6c]), ord(data[0x6d]), ord(data[0x6e])]
self.__is_in_nebula = (ord(data[0x6f]) == 1)
"""
'black_hole_blocks': [
],
'0x1f': ord(self.__data[0x1f]),
'0x20': ord(self.__data[0x20]),
'0x21': ord(self.__data[0x21]),
'0x22': ord(self.__data[0x22]),
'0x23': ord(self.__data[0x23]),
'0x24': ord(self.__data[0x24]),
'0x25': ord(self.__data[0x25]),
'0x26': ord(self.__data[0x26]),
'0x27': ord(self.__data[0x27]),
'0x2a': ord(self.__data[0x2a]), # blockaded? ( 0 | 1 )
'0x2b': ord(self.__data[0x2b]),
'0x2c': ord(self.__data[0x2c]),
'0x2d': ord(self.__data[0x2d]),
'0x2e': ord(self.__data[0x2e]),
'0x2f': ord(self.__data[0x2f]),
'0x30': ord(self.__data[0x30]),
'0x31': ord(self.__data[0x31]),
'0x32': ord(self.__data[0x32]),
'0x34': ord(self.__data[0x34]),
'0x35': ord(self.__data[0x35]),
'0x36': ord(self.__data[0x36]),
'0x37': ord(self.__data[0x37]),
'0x38': ord(self.__data[0x38]),
'0x3a': ord(self.__data[0x3a]),
'0x3b': ord(self.__data[0x3b]),
'0x3c': ord(self.__data[0x3c]),
'0x3d': ord(self.__data[0x3d]),
'0x3e': ord(self.__data[0x3e]), # jumpgate players bitmask
'0x3f': ord(self.__data[0x3f]), # artemist_net players bitmask
'0x40': ord(self.__data[0x40]), # dimensional portal players bitmask
'0x41': ord(self.__data[0x41]), # is_stagepoint bitvector tells whether star is stagepoint for each AI
'0x42': ord(self.__data[0x42]), # officer_id foir player #0
'0x43': ord(self.__data[0x43]), # officer_id foir player #1
'0x44': ord(self.__data[0x44]), # officer_id foir player #2
'0x45': ord(self.__data[0x45]), # officer_id foir player #3
'0x46': ord(self.__data[0x46]), # officer_id foir player #4
'0x47': ord(self.__data[0x47]), # officer_id foir player #5
'0x48': ord(self.__data[0x48]), # officer_id foir player #6
'0x49': ord(self.__data[0x49]), # officer_id foir player #7
# ??? Relocation star id (0-7 player #, not sure about size of array. )
'0x54': ord(self.__data[0x54]),
'0x55': ord(self.__data[0x55]),
'0x56': ord(self.__data[0x56]),
'0x57': ord(self.__data[0x57]),
'0x58': ord(self.__data[0x58]),
'0x59': ord(self.__data[0x59]),
'0x5a': ord(self.__data[0x5a]),
'0x5b': ord(self.__data[0x5b]),
'0x5c': ord(self.__data[0x5c]),
'0x5d': ord(self.__data[0x5d]),
'0x5e': ord(self.__data[0x5e]),
'0x5f': ord(self.__data[0x5f]),
'0x60': ord(self.__data[0x60]),
'0x61': ord(self.__data[0x61]),
'0x62': ord(self.__data[0x62]),
'0x63': ord(self.__data[0x63]),
# unknown:
'0x64': ord(self.__data[0x64]), # not used? always = 255 ?
'0x65': ord(self.__data[0x65]), # not used? always = 255 ?
'0x66': ord(self.__data[0x66]), # not used? always = 0 ?
'0x67': ord(self.__data[0x67]), # surrender to #0 normally -1, else player to give colonies to
'0x68': ord(self.__data[0x68]), # surrender to #1 normally -1, else player to give colonies to
'0x69': ord(self.__data[0x69]), # surrender to #2 normally -1, else player to give colonies to
'0x6a': ord(self.__data[0x6a]), # surrender to #3 normally -1, else player to give colonies to
'0x6b': ord(self.__data[0x6b]), # surrender to #4 normally -1, else player to give colonies to
'0x6c': ord(self.__data[0x6c]), # surrender to #5 normally -1, else player to give colonies to
'0x6d': ord(self.__data[0x6d]), # surrender to #6 normally -1, else player to give colonies to
'0x6e': ord(self.__data[0x6e]), # surrender to #7 normally -1, else player to give colonies to
'0x6f': ord(self.__data[0x6f]), # in nebula
'0x70': ord(self.__data[0x70]) # artifacts_gave_app
# /import_from_moo2
"""
class UnexploredStar(Star):
"""
stars[star_id] = {
'id': star.get_id(),
'name': "Unexplored",
'x': star.get_x(),
'y': star.get_y(),
'size': star.size(),
'pict_type': star.pict_type(),
'class': star.get_class(),
'visited': star.visited
}
"""
def __init__(self, star_id, x, y, size, pict_type, cl):
self.set_id(star_id)
self.set_name("Unexplored")
self.set_x(x)
self.set_y(y)
self.set_size(size)
self.set_class(cl)
self.set_pict_type(pict_type)
def visited(self):
return False
def visited_by_player(self, player_id):
return False
|
mimi1vx/openmoo2
|
oldmess/universe/star.py
|
Python
|
gpl-2.0
| 10,080
|
[
"VisIt"
] |
60a71c67c380ac4d9c1c495dd5acb7a0450795f1705a4fe078e9bd3844fa318b
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import numpy as np
from pyscf import scf
def convert_to_gcisd(myci):
from pyscf.ci import gcisd
if isinstance(myci, gcisd.GCISD):
return myci
mf = scf.addons.convert_to_ghf(myci._scf)
gci = gcisd.GCISD(mf)
assert(myci._nocc is None)
assert(myci._nmo is None)
gci.__dict__.update(myci.__dict__)
gci._scf = mf
gci.mo_coeff = mf.mo_coeff
gci.mo_occ = mf.mo_occ
if isinstance(myci.frozen, (int, np.integer)):
gci.frozen = myci.frozen * 2
else:
raise NotImplementedError
gci.ci = gcisd.from_rcisdvec(myci.ci, myci.nocc, mf.mo_coeff.orbspin)
return gci
|
sunqm/pyscf
|
pyscf/ci/addons.py
|
Python
|
apache-2.0
| 1,311
|
[
"PySCF"
] |
7f9350ffa4687eb27d46ef99db63ff11b313416f62e6a4f957702d1777755727
|
from __future__ import division, unicode_literals
import inspect
import time
import serial
from .util import to_two_bytes, two_byte_iter_to_str, pin_list_to_board_dict
# Message command bytes (0x80(128) to 0xFF(255)) - straight from Firmata.h
DIGITAL_MESSAGE = 0x90 # send data for a digital pin
ANALOG_MESSAGE = 0xE0 # send data for an analog pin (or PWM)
DIGITAL_PULSE = 0x91 # SysEx command to send a digital pulse
# PULSE_MESSAGE = 0xA0 # proposed pulseIn/Out msg (SysEx)
# SHIFTOUT_MESSAGE = 0xB0 # proposed shiftOut msg (SysEx)
REPORT_ANALOG = 0xC0 # enable analog input by pin #
REPORT_DIGITAL = 0xD0 # enable digital input by port pair
START_SYSEX = 0xF0 # start a MIDI SysEx msg
SET_PIN_MODE = 0xF4 # set a pin to INPUT/OUTPUT/PWM/etc
END_SYSEX = 0xF7 # end a MIDI SysEx msg
REPORT_VERSION = 0xF9 # report firmware version
SYSTEM_RESET = 0xFF # reset from MIDI
QUERY_FIRMWARE = 0x79 # query the firmware name
# extended command set using sysex (0-127/0x00-0x7F)
# 0x00-0x0F reserved for user-defined commands */
EXTENDED_ANALOG = 0x6F # analog write (PWM, Servo, etc) to any pin
PIN_STATE_QUERY = 0x6D # ask for a pin's current mode and value
PIN_STATE_RESPONSE = 0x6E # reply with pin's current mode and value
CAPABILITY_QUERY = 0x6B # ask for supported modes and resolution of all pins
CAPABILITY_RESPONSE = 0x6C # reply with supported modes and resolution
ANALOG_MAPPING_QUERY = 0x69 # ask for mapping of analog to pin numbers
ANALOG_MAPPING_RESPONSE = 0x6A # reply with mapping info
SERVO_CONFIG = 0x70 # set max angle, minPulse, maxPulse, freq
STRING_DATA = 0x71 # a string message with 14-bits per char
SHIFT_DATA = 0x75 # a bitstream to/from a shift register
I2C_REQUEST = 0x76 # send an I2C read/write request
I2C_REPLY = 0x77 # a reply to an I2C read request
I2C_CONFIG = 0x78 # config I2C settings such as delay times and power pins
REPORT_FIRMWARE = 0x79 # report name and version of the firmware
SAMPLING_INTERVAL = 0x7A # set the poll rate of the main loop
SYSEX_NON_REALTIME = 0x7E # MIDI Reserved for non-realtime messages
SYSEX_REALTIME = 0x7F # MIDI Reserved for realtime messages
# Pin modes.
# except from UNAVAILABLE taken from Firmata.h
UNAVAILABLE = -1
INPUT = 0 # as defined in wiring.h
OUTPUT = 1 # as defined in wiring.h
ANALOG = 2 # analog pin in analogInput mode
PWM = 3 # digital pin in PWM output mode
SERVO = 4 # digital pin in SERVO mode
# Pin types
DIGITAL = OUTPUT # same as OUTPUT below
# ANALOG is already defined above
# Time to wait after initializing serial, used in Board.__init__
BOARD_SETUP_WAIT_TIME = 5
class PinAlreadyTakenError(Exception):
pass
class InvalidPinDefError(Exception):
pass
class NoInputWarning(RuntimeWarning):
pass
class Board(object):
"""The Base class for any board."""
firmata_version = None
firmware = None
firmware_version = None
_command_handlers = {}
_command = None
_stored_data = []
_parsing_sysex = False
def __init__(self, port, layout=None, baudrate=57600, name=None):
self.sp = serial.Serial(port, baudrate)
# Allow 5 secs for Arduino's auto-reset to happen
# Alas, Firmata blinks its version before printing it to serial
# For 2.3, even 5 seconds might not be enough.
# TODO Find a more reliable way to wait until the board is ready
self.pass_time(BOARD_SETUP_WAIT_TIME)
self.name = name
self._layout = layout
if not self.name:
self.name = port
if layout:
self.setup_layout(layout)
else:
self.auto_setup()
# Iterate over the first messages to get firmware data
while self.bytes_available():
self.iterate()
# TODO Test whether we got a firmware name and version, otherwise there
# probably isn't any Firmata installed
def __str__(self):
return "Board{0.name} on {0.sp.port}".format(self)
def __del__(self):
"""
The connection with the a board can get messed up when a script is
closed without calling board.exit() (which closes the serial
connection). Therefore also do it here and hope it helps.
"""
self.exit()
def send_as_two_bytes(self, val):
self.sp.write(bytearray([val % 128, val >> 7]))
def setup_layout(self, board_layout):
"""
Setup the Pin instances based on the given board layout.
"""
# Create pin instances based on board layout
self.analog = []
for i in board_layout['analog']:
self.analog.append(Pin(self, i))
self.digital = []
self.digital_ports = []
for i in range(0, len(board_layout['digital']), 8):
num_pins = len(board_layout['digital'][i:i + 8])
port_number = int(i / 8)
self.digital_ports.append(Port(self, port_number, num_pins))
# Allow to access the Pin instances directly
for port in self.digital_ports:
self.digital += port.pins
# Setup PWM pins
for i in board_layout['pwm']:
self.digital[i].PWM_CAPABLE = True
# Disable certain ports like Rx/Tx and crystal ports
for i in board_layout['disabled']:
self.digital[i].mode = UNAVAILABLE
# Create a dictionary of 'taken' pins. Used by the get_pin method
self.taken = {'analog': dict(map(lambda p: (p.pin_number, False), self.analog)),
'digital': dict(map(lambda p: (p.pin_number, False), self.digital))}
self._set_default_handlers()
def _set_default_handlers(self):
# Setup default handlers for standard incoming commands
self.add_cmd_handler(ANALOG_MESSAGE, self._handle_analog_message)
self.add_cmd_handler(DIGITAL_MESSAGE, self._handle_digital_message)
self.add_cmd_handler(REPORT_VERSION, self._handle_report_version)
self.add_cmd_handler(REPORT_FIRMWARE, self._handle_report_firmware)
def auto_setup(self):
"""
Automatic setup based on Firmata's "Capability Query"
"""
self.add_cmd_handler(CAPABILITY_RESPONSE, self._handle_report_capability_response)
self.send_sysex(CAPABILITY_QUERY, [])
self.pass_time(0.1) # Serial SYNC
while self.bytes_available():
self.iterate()
# handle_report_capability_response will write self._layout
if self._layout:
self.setup_layout(self._layout)
else:
raise IOError("Board detection failed.")
def add_cmd_handler(self, cmd, func):
"""Adds a command handler for a command."""
len_args = len(inspect.getargspec(func)[0])
def add_meta(f):
def decorator(*args, **kwargs):
f(*args, **kwargs)
decorator.bytes_needed = len_args - 1 # exclude self
decorator.__name__ = f.__name__
return decorator
func = add_meta(func)
self._command_handlers[cmd] = func
def get_pin(self, pin_def):
"""
Returns the activated pin given by the pin definition.
May raise an ``InvalidPinDefError`` or a ``PinAlreadyTakenError``.
:arg pin_def: Pin definition as described below,
but without the arduino name. So for example ``a:1:i``.
'a' analog pin Pin number 'i' for input
'd' digital pin Pin number 'o' for output
'p' for pwm (Pulse-width modulation)
All seperated by ``:``.
"""
if type(pin_def) == list:
bits = pin_def
else:
bits = pin_def.split(':')
a_d = bits[0] == 'a' and 'analog' or 'digital'
part = getattr(self, a_d)
pin_nr = int(bits[1])
if pin_nr >= len(part):
raise InvalidPinDefError('Invalid pin definition: {0} at position 3 on {1}'.format(pin_def, self.name))
if getattr(part[pin_nr], 'mode', None) == UNAVAILABLE:
raise InvalidPinDefError('Invalid pin definition: UNAVAILABLE pin {0} at position on {1}'.format(pin_def, self.name))
if self.taken[a_d][pin_nr]:
raise PinAlreadyTakenError('{0} pin {1} is already taken on {2}'.format(a_d, bits[1], self.name))
# ok, should be available
pin = part[pin_nr]
self.taken[a_d][pin_nr] = True
if pin.type is DIGITAL:
if bits[2] == 'p':
pin.mode = PWM
elif bits[2] == 's':
pin.mode = SERVO
elif bits[2] != 'o':
pin.mode = INPUT
else:
pin.enable_reporting()
return pin
def pass_time(self, t):
"""Non-blocking time-out for ``t`` seconds."""
cont = time.time() + t
while time.time() < cont:
time.sleep(0)
def send_sysex(self, sysex_cmd, data):
"""
Sends a SysEx msg.
:arg sysex_cmd: A sysex command byte
: arg data: a bytearray of 7-bit bytes of arbitrary data
"""
msg = bytearray([START_SYSEX, sysex_cmd])
msg.extend(data)
msg.append(END_SYSEX)
self.sp.write(msg)
def bytes_available(self):
return self.sp.inWaiting()
def iterate(self):
"""
Reads and handles data from the microcontroller over the serial port.
This method should be called in a main loop or in an :class:`Iterator`
instance to keep this boards pin values up to date.
"""
byte = self.sp.read()
if not byte:
return
data = ord(byte)
received_data = []
handler = None
if data < START_SYSEX:
# These commands can have 'channel data' like a pin nummber appended.
try:
handler = self._command_handlers[data & 0xF0]
except KeyError:
return
received_data.append(data & 0x0F)
while len(received_data) < handler.bytes_needed:
received_data.append(ord(self.sp.read()))
elif data == START_SYSEX:
data = ord(self.sp.read())
handler = self._command_handlers.get(data)
if not handler:
return
data = ord(self.sp.read())
while data != END_SYSEX:
received_data.append(data)
data = ord(self.sp.read())
else:
try:
handler = self._command_handlers[data]
except KeyError:
return
while len(received_data) < handler.bytes_needed:
received_data.append(ord(self.sp.read()))
# Handle the data
try:
handler(*received_data)
except ValueError:
pass
def get_firmata_version(self):
"""
Returns a version tuple (major, minor) for the firmata firmware on the
board.
"""
return self.firmata_version
def servo_config(self, pin, min_pulse=544, max_pulse=2400, angle=0):
"""
Configure a pin as servo with min_pulse, max_pulse and first angle.
``min_pulse`` and ``max_pulse`` default to the arduino defaults.
"""
if pin > len(self.digital) or self.digital[pin].mode == UNAVAILABLE:
raise IOError("Pin {0} is not a valid servo pin".format(pin))
data = bytearray([pin])
data += to_two_bytes(min_pulse)
data += to_two_bytes(max_pulse)
self.send_sysex(SERVO_CONFIG, data)
# set pin._mode to SERVO so that it sends analog messages
# don't set pin.mode as that calls this method
self.digital[pin]._mode = SERVO
self.digital[pin].write(angle)
def exit(self):
"""Call this to exit cleanly."""
# First detach all servo's, otherwise it somehow doesn't want to close...
if hasattr(self, 'digital'):
for pin in self.digital:
if pin.mode == SERVO:
pin.mode = OUTPUT
if hasattr(self, 'sp'):
self.sp.close()
# Command handlers
def _handle_analog_message(self, pin_nr, lsb, msb):
value = round(float((msb << 7) + lsb) / 1023, 4)
# Only set the value if we are actually reporting
try:
if self.analog[pin_nr].reporting:
self.analog[pin_nr].value = value
except IndexError:
raise ValueError
def _handle_digital_message(self, port_nr, lsb, msb):
"""
Digital messages always go by the whole port. This means we have a
bitmask which we update the port.
"""
mask = (msb << 7) + lsb
try:
self.digital_ports[port_nr]._update(mask)
except IndexError:
raise ValueError
def _handle_report_version(self, major, minor):
self.firmata_version = (major, minor)
def _handle_report_firmware(self, *data):
major = data[0]
minor = data[1]
self.firmware_version = (major, minor)
self.firmware = two_byte_iter_to_str(data[2:])
def _handle_report_capability_response(self, *data):
charbuffer = []
pin_spec_list = []
for c in data:
if c == CAPABILITY_RESPONSE:
continue
charbuffer.append(c)
if c == 0x7F:
# A copy of charbuffer
pin_spec_list.append(charbuffer[:])
charbuffer = []
self._layout = pin_list_to_board_dict(pin_spec_list)
class Port(object):
"""An 8-bit port on the board."""
def __init__(self, board, port_number, num_pins=8):
self.board = board
self.port_number = port_number
self.reporting = False
self.pins = []
for i in range(num_pins):
pin_nr = i + self.port_number * 8
self.pins.append(Pin(self.board, pin_nr, type=DIGITAL, port=self))
def __str__(self):
return "Digital Port {0.port_number} on {0.board}".format(self)
def enable_reporting(self):
"""Enable reporting of values for the whole port."""
self.reporting = True
msg = bytearray([REPORT_DIGITAL + self.port_number, 1])
self.board.sp.write(msg)
for pin in self.pins:
if pin.mode == INPUT:
pin.reporting = True # TODO Shouldn't this happen at the pin?
def disable_reporting(self):
"""Disable the reporting of the port."""
self.reporting = False
msg = bytearray([REPORT_DIGITAL + self.port_number, 0])
self.board.sp.write(msg)
def write(self):
"""Set the output pins of the port to the correct state."""
mask = 0
for pin in self.pins:
if pin.mode == OUTPUT:
if pin.value == 1:
pin_nr = pin.pin_number - self.port_number * 8
mask |= 1 << int(pin_nr)
# print("type mask", type(mask))
# print("type self.portnumber", type(self.port_number))
# print("type pinnr", type(pin_nr))
msg = bytearray([DIGITAL_MESSAGE + self.port_number, mask % 128, mask >> 7])
self.board.sp.write(msg)
def _update(self, mask):
"""Update the values for the pins marked as input with the mask."""
if self.reporting:
for pin in self.pins:
if pin.mode is INPUT:
pin_nr = pin.pin_number - self.port_number * 8
pin.value = (mask & (1 << pin_nr)) > 0
class Pin(object):
"""A Pin representation"""
def __init__(self, board, pin_number, type=ANALOG, port=None):
self.board = board
self.pin_number = pin_number
self.type = type
self.port = port
self.PWM_CAPABLE = False
self._mode = (type == DIGITAL and OUTPUT or INPUT)
self.reporting = False
self.value = None
def __str__(self):
type = {ANALOG: 'Analog', DIGITAL: 'Digital'}[self.type]
return "{0} pin {1}".format(type, self.pin_number)
def _set_mode(self, mode):
if mode is UNAVAILABLE:
self._mode = UNAVAILABLE
return
if self._mode is UNAVAILABLE:
raise IOError("{0} can not be used through Firmata".format(self))
if mode is PWM and not self.PWM_CAPABLE:
raise IOError("{0} does not have PWM capabilities".format(self))
if mode == SERVO:
if self.type != DIGITAL:
raise IOError("Only digital pins can drive servos! {0} is not"
"digital".format(self))
self._mode = SERVO
self.board.servo_config(self.pin_number)
return
# Set mode with SET_PIN_MODE message
self._mode = mode
self.board.sp.write(bytearray([SET_PIN_MODE, self.pin_number, mode]))
if mode == INPUT:
self.enable_reporting()
def _get_mode(self):
return self._mode
mode = property(_get_mode, _set_mode)
"""
Mode of operation for the pin. Can be one of the pin modes: INPUT, OUTPUT,
ANALOG, PWM. or SERVO (or UNAVAILABLE).
"""
def enable_reporting(self):
"""Set an input pin to report values."""
if self.mode is not INPUT:
raise IOError("{0} is not an input and can therefore not report".format(self))
if self.type == ANALOG:
self.reporting = True
msg = bytearray([REPORT_ANALOG + self.pin_number, 1])
self.board.sp.write(msg)
else:
self.port.enable_reporting()
# TODO This is not going to work for non-optimized boards like Mega
def disable_reporting(self):
"""Disable the reporting of an input pin."""
if self.type == ANALOG:
self.reporting = False
msg = bytearray([REPORT_ANALOG + self.pin_number, 0])
self.board.sp.write(msg)
else:
self.port.disable_reporting()
# TODO This is not going to work for non-optimized boards like Mega
def read(self):
"""
Returns the output value of the pin. This value is updated by the
boards :meth:`Board.iterate` method. Value is always in the range from
0.0 to 1.0.
"""
if self.mode == UNAVAILABLE:
raise IOError("Cannot read pin {0}".format(self.__str__()))
return self.value
def write(self, value):
"""
Output a voltage from the pin
:arg value: Uses value as a boolean if the pin is in output mode, or
expects a float from 0 to 1 if the pin is in PWM mode. If the pin
is in SERVO the value should be in degrees.
"""
if self.mode is UNAVAILABLE:
raise IOError("{0} can not be used through Firmata".format(self))
if self.mode is INPUT:
raise IOError("{0} is set up as an INPUT and can therefore not be written to".format(self))
if value is not self.value:
self.value = value
if self.mode is OUTPUT:
if self.port:
self.port.write()
else:
msg = bytearray([DIGITAL_MESSAGE, self.pin_number, value])
self.board.sp.write(msg)
elif self.mode is PWM:
value = int(round(value * 255))
msg = bytearray([ANALOG_MESSAGE + self.pin_number, value % 128, value >> 7])
self.board.sp.write(msg)
elif self.mode is SERVO:
value = int(value)
msg = bytearray([ANALOG_MESSAGE + self.pin_number, value % 128, value >> 7])
self.board.sp.write(msg)
|
andyclymer/ControlBoard
|
lib/modules/pyFirmata-master/pyfirmata/pyfirmata.py
|
Python
|
mit
| 19,932
|
[
"CRYSTAL"
] |
bf1abf0095b4d6a4aa774d50215e310257c2fcaa691d73c9a019993f4ef506d4
|
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
'''
Tests for the initializer classes.
'''
import itertools as itt
import numpy as np
from neon import NervanaObject
from neon.initializers.initializer import Array, Constant, Uniform, Gaussian, GlorotUniform
def pytest_generate_tests(metafunc):
if 'args' in metafunc.fixturenames:
fargs = []
dim1 = [1, 5]
dim2 = [2, 10]
fargs = itt.product(dim1, dim2)
metafunc.parametrize('args', fargs)
def test_constant(backend_default, args):
be = NervanaObject.be
dim1, dim2 = args
shape = (dim1, dim2)
const_arg = 3
Wdev = be.empty(shape)
const_init = Constant(const_arg)
const_init.fill(Wdev)
Whost = Wdev.get()
flat = Whost.flatten()
for elt in flat:
assert elt == const_arg
return
def test_array(backend_default, args):
be = NervanaObject.be
dim1, dim2 = args
shape = (dim1, dim2)
Wloc = be.array(np.arange(shape[0]*shape[1]).reshape(shape))
Wdev = be.empty(shape)
init = Array(Wdev)
init.fill(Wloc)
assert np.all(np.equal(Wdev.get(), Wloc.get()))
return
def test_uniform(backend_default, args):
be = NervanaObject.be
dim1, dim2 = args
shape = (dim1, dim2)
Wdev = be.empty(shape)
uniform_init = Uniform(low=-5, high=15)
uniform_init.fill(Wdev)
Whost = Wdev.get()
flat = Whost.flatten()
for elt in flat:
assert elt <= 15 and elt >= -5
return
def test_gaussian(backend_default, args):
be = NervanaObject.be
dim1, dim2 = args
shape = (dim1, dim2)
Wdev = be.empty(shape)
gaussian_init = Gaussian(loc=10000, scale=1)
gaussian_init.fill(Wdev)
Whost = Wdev.get()
flat = Whost.flatten()
for elt in flat:
# Not a very robust test...
assert elt >= 0
return
def test_glorot(backend_default, args):
be = NervanaObject.be
shape_1 = (1, 2)
shape_2 = (1000, 10000)
Wdev_1 = be.empty(shape_1)
Wdev_2 = be.empty(shape_2)
glorot_init = GlorotUniform()
glorot_init.fill(Wdev_1)
glorot_init.fill(Wdev_2)
Whost_1 = Wdev_1.get()
Whost_2 = Wdev_2.get()
mean_1 = np.mean(Whost_1)
mean_2 = np.mean(Whost_2)
assert np.abs(mean_1) > np.abs(mean_2)
return
|
coufon/neon-distributed
|
tests/test_initializers.py
|
Python
|
apache-2.0
| 2,974
|
[
"Gaussian"
] |
6703b6b794c737b5708d7c99fd292fc7a91d70d144dcaab1d65b90258d1fe77d
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Löwdin population analysis."""
import random
import numpy
from cclib.method.population import Population
class LPA(Population):
"""The Löwdin population analysis"""
def __init__(self, *args):
super().__init__(logname="LPA", *args)
def __str__(self):
"""Return a string representation of the object."""
return "LPA of %s" % (self.data)
def __repr__(self):
"""Return a representation of the object."""
return 'LPA("%s")' % (self.data)
def calculate(self, indices=None, x=0.5, fupdate=0.05):
"""Perform a calculation of Löwdin population analysis.
Inputs:
indices - list of lists containing atomic orbital indices of fragments
x - overlap matrix exponent in wavefunxtion projection (x=0.5 for Lowdin)
"""
unrestricted = (len(self.data.mocoeffs) == 2)
nbasis = self.data.nbasis
# Determine number of steps, and whether process involves beta orbitals.
self.logger.info("Creating attribute aoresults: [array[2]]")
alpha = len(self.data.mocoeffs[0])
self.aoresults = [ numpy.zeros([alpha, nbasis], "d") ]
nstep = alpha
if unrestricted:
beta = len(self.data.mocoeffs[1])
self.aoresults.append(numpy.zeros([beta, nbasis], "d"))
nstep += beta
# intialize progress if available
if self.progress:
self.progress.initialize(nstep)
if hasattr(self.data, "aooverlaps"):
S = self.data.aooverlaps
elif hasattr(self.data, "fooverlaps"):
S = self.data.fooverlaps
# Get eigenvalues and matrix of eigenvectors for transformation decomposition (U).
# Find roots of diagonal elements, and transform backwards using eigevectors.
# We need two matrices here, one for S^x, another for S^(1-x).
# We don't need to invert U, since S is symmetrical.
eigenvalues, U = numpy.linalg.eig(S)
UI = U.transpose()
Sdiagroot1 = numpy.identity(len(S))*numpy.power(eigenvalues, x)
Sdiagroot2 = numpy.identity(len(S))*numpy.power(eigenvalues, 1-x)
Sroot1 = numpy.dot(U, numpy.dot(Sdiagroot1, UI))
Sroot2 = numpy.dot(U, numpy.dot(Sdiagroot2, UI))
step = 0
for spin in range(len(self.data.mocoeffs)):
for i in range(len(self.data.mocoeffs[spin])):
if self.progress and random.random() < fupdate:
self.progress.update(step, "Lowdin Population Analysis")
ci = self.data.mocoeffs[spin][i]
temp1 = numpy.dot(ci, Sroot1)
temp2 = numpy.dot(ci, Sroot2)
self.aoresults[spin][i] = numpy.multiply(temp1, temp2).astype("d")
step += 1
if self.progress:
self.progress.update(nstep, "Done")
retval = super().partition(indices)
if not retval:
self.logger.error("Error in partitioning results")
return False
# Create array for charges.
self.logger.info("Creating fragcharges: array[1]")
size = len(self.fragresults[0][0])
self.fragcharges = numpy.zeros([size], "d")
alpha = numpy.zeros([size], "d")
if unrestricted:
beta = numpy.zeros([size], "d")
for spin in range(len(self.fragresults)):
for i in range(self.data.homos[spin] + 1):
temp = numpy.reshape(self.fragresults[spin][i], (size,))
self.fragcharges = numpy.add(self.fragcharges, temp)
if spin == 0:
alpha = numpy.add(alpha, temp)
elif spin == 1:
beta = numpy.add(beta, temp)
if not unrestricted:
self.fragcharges = numpy.multiply(self.fragcharges, 2)
else:
self.logger.info("Creating fragspins: array[1]")
self.fragspins = numpy.subtract(alpha, beta)
return True
|
cclib/cclib
|
cclib/method/lpa.py
|
Python
|
bsd-3-clause
| 4,317
|
[
"cclib"
] |
9fadadaad15e779a329788b9cdd51a8b9fc08e860ec738b1e2f2408751a0ff72
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# -*- coding: iso-8859-1 -*-
"""This Python module initializes particles on the sites
of a simple cubic lattice. By setting perfect=False
the particle positions will be given random displacements
with a magnitude of one-tenth the lattice spacing."""
def createCubic(N, rho, perfect=True, RNG=None):
if RNG == None:
import random
cubes = []
for i in range(100):
cubes.append(i**3)
if(cubes.count(N) != 1):
print '\nWARNING: num_particles is not a perfect cube. Initial'
print ' configuration may be inhomogeneous.\n'
L = (N / rho)**(1.0/3.0)
a = int(N**(1.0/3.0))
if(a**3 < N):
a = a + 1
lattice_spacing = L / a
def rnd(magn_):
if RNG == None:
rand = random.random()
else :
rand = RNG()
return magn_ * (2.0 * rand - 1.0)
# magnitude of random displacements
magn = 0.0 if perfect else lattice_spacing / 10.0
ct = 0
x = []
y = []
z = []
for i in range(a):
for j in range(a):
for k in range(a):
if(ct < N):
x.append(0.5 * lattice_spacing + i * lattice_spacing + rnd(magn))
y.append(0.5 * lattice_spacing + j * lattice_spacing + rnd(magn))
z.append(0.5 * lattice_spacing + k * lattice_spacing + rnd(magn))
ct += 1
return x, y, z, L, L, L
# TODO implement checking for a wrong number of particles, lightly nonideal lattice etc.
def createDiamond(N, rho, perfect=True, RNG=None):
from espressopp import Real3D
#L = (N / 8.0 / rho)**(1.0/3.0)
L = (N / rho)**(1.0/3.0)
num_per_edge = int( (N/8.0)**(1.0/3.0) )
if(8.0*num_per_edge**3 < N):
num_per_edge = num_per_edge + 1
#print 'num_per_site= ', num_per_edge
a = L / num_per_edge
#print 'a= ', a
#print 'a1= ', (1.0 / rho)**(1.0/3.0)
pos = []
# in general structure is shifted relative to (0,0,0)
R0 = Real3D(0.125 * a, 0.125 * a, 0.125 * a)
R1 = Real3D(0.25 * a, 0.25 * a, 0.25 * a)
a11 = a * Real3D(1,0,0)
a22 = a * Real3D(0,1,0)
a33 = a * Real3D(0,0,1)
a1 = 0.5 * a * Real3D(0,1,1)
a2 = 0.5 * a * Real3D(1,0,1)
a3 = 0.5 * a * Real3D(1,1,0)
for i in range(num_per_edge):
for j in range(num_per_edge):
for k in range(num_per_edge):
Rijk = R0 + i*a11 + j*a22 + k*a33
pos.append(Rijk)
pos.append(Rijk+a1)
pos.append(Rijk+a2)
pos.append(Rijk+a3)
pos.append(Rijk+R1)
pos.append(Rijk+a1+R1)
pos.append(Rijk+a2+R1)
pos.append(Rijk+a3+R1)
'''
L1 = L-0.01
pos.append( Real3D(0.01, 0.01, 0.01) )
pos.append( Real3D(L1, 0.01, 0.01) )
pos.append( Real3D(0.01, L1, 0.01) )
pos.append( Real3D(0.01, 0.01, L1) )
pos.append( Real3D(0.01, L1, L1) )
pos.append( Real3D(L1, L1, 0.01) )
pos.append( Real3D(L1, 0.01, L1) )
pos.append( Real3D(L1, L1, L1) )
'''
return pos, L, L, L
|
capoe/espressopp.soap
|
src/tools/init_cfg/lattice.py
|
Python
|
gpl-3.0
| 3,714
|
[
"ESPResSo"
] |
717878a19175f2566947abec90694f1427c3f32318876ba482e9463d48169e4a
|
import os
from itertools import product
import csv
from django.test import SimpleTestCase
from django.conf import settings
from django.core.management import call_command
import numpy
import mock
import gjfwriter
import utils
import constants
import mol_name
import ml
import structure
import fileparser
import graph
import random_gen
from management.commands.update_ml import lock
from project.utils import StringIO
from data.models import DataPoint
# TON
NAIVE_FEATURE_VECTOR = [
1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1
]
# TON
DECAY_FEATURE_VECTOR = [
1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1.0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1
]
# TON_2435254
DECAY_DISTANCE_CORRECTION_FEATURE_VECTOR = [
1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1.1666306666987287, 0.30601135800974139, 0,
0, 0.77061831219939703, 0.46543587033877482,
0, 0, 0.11239806193044281, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1.5558012811957163,
0.84648729633515529, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1.5558012811957163,
0.84648729633515529, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1
]
METHANE = """
C
H 1 B1
H 1 B2 2 A1
H 1 B3 3 A2 2 D1 0
H 1 B4 3 A3 2 D2 0
B1 1.07000000
B2 1.07000000
B3 1.07000000
B4 1.07000000
A1 109.47120255
A2 109.47125080
A3 109.47121829
D1 -119.99998525
D2 120.00000060
"""
METHANE_REPLACED = """
C
H 1 1.07000000
H 1 1.07000000 2 109.47120255
H 1 1.07000000 3 109.47125080 2 -119.99998525 0
H 1 1.07000000 3 109.47121829 2 120.00000060 0
"""
METHANE_CART = """
C 0.00000000 0.00000000 0.00000000
H 1.07000000 0.00000000 0.00000000
H -0.35666635 1.00880579 0.00000000
H -0.35666635 -0.50440312 0.87365131
H -0.35666686 -0.50440269 -0.87365135
"""
METHANE_ALL = """
C 0.000000 0.000000 0.000000
H 1.070000 0.000000 0.000000
H -0.356666 1.008806 0.000000
H -0.356666 -0.504403 0.873651
H -0.356667 -0.504403 -0.873651
1 2 1.0 3 1.0 4 1.0 5 1.0
2
3
4
5
"""
BENZENE = """
C
C 1 B1
C 2 B2 1 A1
C 3 B3 2 A2 1 D1 0
C 4 B4 3 A3 2 D2 0
C 1 B5 2 A4 3 D3 0
H 1 B6 6 A5 5 D4 0
H 2 B7 1 A6 6 D5 0
H 3 B8 2 A7 1 D6 0
H 4 B9 3 A8 2 D7 0
H 5 B10 4 A9 3 D8 0
H 6 B11 1 A10 2 D9 0
B1 1.39516000
B2 1.39471206
B3 1.39542701
B4 1.39482508
B5 1.39482907
B6 1.09961031
B7 1.09965530
B8 1.09968019
B9 1.09968011
B10 1.09976099
B11 1.09960403
A1 120.00863221
A2 119.99416459
A3 119.99399231
A4 119.99845680
A5 120.00431986
A6 119.98077039
A7 120.01279489
A8 119.98114211
A9 120.01134336
A10 120.00799702
D1 -0.05684321
D2 0.03411439
D3 0.03234809
D4 -179.97984142
D5 179.95324796
D6 179.96185208
D7 -179.99643617
D8 -179.99951388
D9 179.98917535
"""
BENZENE_CART = """
C 0.00000000 0.00000000 0.00000000
C 1.39516000 0.00000000 0.00000000
C 2.09269800 1.20775100 0.00000000
C 1.39504400 2.41626000 0.00119900
C 0.00022024 2.41618128 0.00311654
C -0.69738200 1.20797600 0.00068200
H -0.54975851 -0.95231608 -0.00158377
H 1.94466800 -0.95251300 -0.00131500
H 3.19237800 1.20783100 -0.00063400
H 1.94524390 3.36840306 0.00113950
H -0.54990178 3.36846229 0.00405378
H -1.79698600 1.20815900 0.00086200
"""
# A_TON_A_A
STRUCTURE_GJF = """
C -0.022105 -0.036359 -0.000155
C 1.392147 -0.046153 -0.000105
C -0.814778 1.099625 -0.000156
C 2.129327 1.141675 -0.000052
C -0.077598 2.287453 -0.000108
C 1.336654 2.277659 -0.000059
C 0.735390 -2.059327 -0.000187
C 0.579160 4.300627 -0.000078
O 1.749247 3.590614 -0.000001
O -0.434697 -1.349315 -0.000186
N -0.513273 3.616121 -0.000069
N 1.827823 -1.374820 -0.000104
H -1.897987 1.079613 -0.000185
H 3.212537 1.161687 -0.000006
H 0.629492 -3.134847 -0.000207
H 0.685059 5.376147 -0.000043
1 2 1.5 3 1.5 10 1.0
2 4 1.5 12 1.0
3 5 1.5 13 1.0
4 6 1.5 14 1.0
5 6 1.5 11 1.0
6 9 1.0
7 10 1.0 12 2.0 15 1.0
8 9 1.0 11 2.0 16 1.0
9
10
11
12
13
14
15
16"""
METHANE_FREEZE = """
%chk=t.chk
# hf/3-21g geom=(modredundant,connectivity)
Title Card Required
0 1
C
H 1 B1
H 1 B2 2 A1
H 1 B3 3 A2 2 D1 0
H 1 B4 3 A3 2 D2 0
B1 1.07000000
B2 1.07000000
B3 1.07000000
B4 1.07000000
A1 109.47120255
A2 109.47125080
A3 109.47121829
D1 -119.99998525
D2 120.00000060
1 2 1.0 3 1.0 4 1.0 5 1.0
2
3
4
5
B 5 1 F
"""
METHANE_FREEZE2 = """
%chk=t.chk
# hf/3-21g geom=(modredundant,connectivity)
Title Card Required
0 1
C
H 1 1.07000000
H 1 1.07000000 2 109.47120255
H 1 1.07000000 3 109.47125080 2 -119.99998525 0
H 1 1.07000000 3 109.47121829 2 120.00000060 0
1 2 1.0 3 1.0 4 1.0 5 1.0
2
3
4
5
B 5 1 F
"""
# hashlib.sha224(string).hexdigest()
PNG_HASH = "4cbf2c82970819ccbe66025fdbc627171af31571c96e06323a98c945"
SVG_HASH = "c095979c874d01bd997ac6435b9e72a74e510060aad2df7ca4d58c1d"
DATA_POINT = {
"name": "A_TON_A_A",
"exact_name": "A_TON_A_A_n1_m1_x1_y1_z1",
"options": "td B3LYP/6-31g(d) geom=connectivity",
"homo": -6.460873931,
"lumo": -1.31976745,
"homo_orbital": 41,
"dipole": 0.0006,
"energy": -567.1965205,
"band_gap": 4.8068,
}
def row_select(row):
return row[1:4] + row[5:]
path = os.path.join(settings.MEDIA_ROOT, "tests", "results.csv")
with open(path, 'r') as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
LOG_DATA = {}
for i, row in enumerate(reader):
if not row:
continue
if not i:
key = 'header'
else:
key = row[0]
if key in LOG_DATA:
LOG_DATA[key] = [LOG_DATA[key],
row_select(row)]
else:
LOG_DATA[key] = row_select(row)
class StructureTestCase(SimpleTestCase):
templates = [
"{0}_TON",
"CON_{0}",
"TON_{0}_",
"{0}_TPN_{0}",
"{0}_TNN_{0}_",
"CPP_{0}_{0}",
"{0}_TON_{0}_{0}",
"{0}",
]
cores = constants.CORES
invalid_cores = ["cao", "bo", "CONA", "asD"]
valid_polymer_sides = ['2', '4b', '22', '24', '4bc', '44bc', '4b4',
'5-', '5-5', '55-', '5-a', '5-ab4-', '4b114b']
invalid_polymer_sides = ['B', '2B']
valid_sides = valid_polymer_sides + invalid_polymer_sides
invalid_sides = ['~', 'b', 'c', 'BB', 'TON', 'Dc', '4aaa',
'24C2', 'awr', 'A-', '5B-', '2a', '4abc']
valid_polymer_options = ['_n1', '_n2', '_n3',
'_m1', '_m2', '_m3',
'_n1_m1']
invalid_polymer_options = ['_n2_m2', '_n3_m3', '_m2_n2', '_m3_n3',
'_n0', '_m0', '_n0_m0']
def test_atom_print(self):
atom = structure.Atom(0, 0, 0, "C")
self.assertEqual(str(atom), "C 0.000000 0.000000 0.000000")
def test_atom_json_property(self):
ele, x, y, z = ('C', 0.0, 0.0, 0.0)
atom = structure.Atom(x, y, z, ele)
data = {
"element": ele,
"x": x,
"y": y,
"z": z,
}
self.assertEqual(atom.json, data)
def test_get_mass(self):
struct = structure.from_name("TON")
result = struct.get_mass()
self.assertAlmostEqual(result, 160.1316)
def test_draw_no_hydrogen(self):
struct = structure.from_name("TON")
struct.draw(10, hydrogens=False)
def test_draw_no_fancy_bonds(self):
struct = structure.from_name("TON")
struct.draw(10, fancy_bonds=False)
def test_get_center(self):
struct = structure.from_name("TON")
result = struct.get_center()
expected = numpy.array([[0.657275, 1.12065, -0.00013125]]).T
self.assertTrue(numpy.allclose(result, expected))
def test_get_mass_center(self):
struct = structure.from_name("TON")
result = struct.get_mass_center()
expected = numpy.array([[
0.657283740998029,
1.12065,
-0.00011002400525567719
]]).T
self.assertTrue(numpy.allclose(result, expected))
def test_get_moment_of_inertia(self):
struct = structure.from_name("TON")
direction = numpy.array([[0, 1, 0]]).T
offset = numpy.array([[0, 0, 0]]).T
result = struct.get_moment_of_inertia(direction=direction,
offset=offset)
self.assertAlmostEqual(result, 239.74162427124799)
def test_get_moment_of_inertia_no_direction(self):
struct = structure.from_name("TON")
offset = numpy.array([[100, 0, 0]]).T
result = struct.get_moment_of_inertia(offset=offset)
self.assertAlmostEqual(result, 1581424.2246356755)
def test_get_moment_of_inertia_no_offset(self):
struct = structure.from_name("TON")
direction = numpy.array([[0, 1, 0]]).T
result = struct.get_moment_of_inertia(direction=direction)
self.assertAlmostEqual(result, 170.56126165978225)
def test_from_data_invalid(self):
with self.assertRaises(Exception):
structure.from_data("filename")
def test_from_gjf(self):
path = os.path.join(settings.MEDIA_ROOT, "tests", "A_TON_A_A.gjf")
s = structure.from_gjf(open(path, 'r'))
self.assertEqual(
[x.strip() for x in s.gjf.split()],
[x.strip() for x in STRUCTURE_GJF.split()])
def test_from_log(self):
path = os.path.join(settings.MEDIA_ROOT, "tests", "A_TON_A_A.log")
s = structure.from_log(open(path, 'r'))
self.assertIn("C -0.022105 -0.036359 -0.000155", s.gjf)
def test_from_gjf_no_bonds(self):
string = "%chk=chk.chk\n# hf\n\nTitle\n\n0 1" + METHANE_REPLACED
f = StringIO(string)
s = structure.from_gjf(f)
self.assertEqual(
[x.strip() for x in s.gjf.split()],
[x.strip() for x in METHANE_ALL.split()])
def test_from_gjf_invalid_header(self):
string = "%chk=c#hk.chk\nasd\n\nTitle\n\n0 1" + METHANE_REPLACED
f = StringIO(string)
with self.assertRaises(Exception):
structure.from_gjf(f)
def test_from_gjf_invalid_sections(self):
string = "%chk=chk.chk\n# hf geom=(connectivity,modredundant)\n\nTitle\n\n0 1"
f = StringIO(string)
with self.assertRaises(Exception):
structure.from_gjf(f)
def test_from_gjf_bonds(self):
string = "%chk=chk.chk\n# hf geom=connectivity\n\nTitle\n\n0 1" + \
STRUCTURE_GJF
f = StringIO(string)
s = structure.from_gjf(f)
self.assertEqual(
[x.strip() for x in s.gjf.split()],
[x.strip() for x in STRUCTURE_GJF.split()])
# This is the same as test_from_gjf_zmatrix
#def test_from_gjf_parameters(self):
# string = "%chk=chk.chk\n# hf\n\nTitle\n\n0 1" + METHANE
# f = StringIO(string)
# s = structure.from_gjf(f)
# self.assertEqual(
# [x.strip() for x in s.gjf.split()],
# [x.strip() for x in METHANE_ALL.split()])
def test_from_gjf_zmatrix(self):
string = "%chk=chk.chk\n# hf\n\nTitle\n\n0 1" + METHANE
f = StringIO(string)
s = structure.from_gjf(f)
self.assertEqual(
[x.strip() for x in s.gjf.split()],
[x.strip() for x in METHANE_ALL.split()])
def test_from_gjf_redundant(self):
string = METHANE_FREEZE
f = StringIO(string)
s = structure.from_gjf(f)
self.assertEqual(
[x.strip() for x in s.gjf.split()],
[x.strip() for x in METHANE_ALL.split()])
def test_from_gjf_redundant_no_parameters(self):
string = METHANE_FREEZE2
f = StringIO(string)
s = structure.from_gjf(f)
self.assertEqual(
[x.strip() for x in s.gjf.split()],
[x.strip() for x in METHANE_ALL.split()])
def test_from_gjf_too_many_first(self):
string = METHANE_FREEZE.replace("modredundant", "") + METHANE
f = StringIO(string)
with self.assertRaises(Exception):
structure.from_gjf(f)
def test_cores(self):
for core in self.cores:
structure.from_name(core)
def test_invalid_cores(self):
for core in self.invalid_cores:
try:
structure.from_name(core)
self.fail(core)
except:
pass
def test_sides(self):
sets = [
self.templates,
self.valid_sides,
]
for template, group in product(*sets):
name = template.format(group)
structure.from_name(name)
# def test_single_side_reduction(self):
# sets = [
# ['2', '23', '4aa', '6cc4bb'],
# [2, 3, 4],
# ]
# for group, num in product(*sets):
# exact_name = mol_name.get_exact_name(group * num)
# expected = group + "_n%d_m1_x1_y1_z1" % num
# self.assertEqual(exact_name, expected)
def test_invalid_sides(self):
sets = [
self.templates,
self.invalid_sides,
]
for template, group in product(*sets):
name = template.format(group)
try:
structure.from_name(name)
if group != "TON":
self.fail(name)
except Exception:
pass
def test_polymer(self):
sets = [
self.templates,
self.valid_polymer_sides,
self.valid_polymer_options
]
for template, group, option in product(*sets):
if template == '{0}' and option.startswith('_m'):
continue
name = template.format(group) + option
structure.from_name(name)
def test_invalid_polymer(self):
sets = [
self.templates,
self.valid_sides,
self.invalid_polymer_options
]
for template, group, option in product(*sets):
name = template.format(group) + option
try:
structure.from_name(name)
self.fail(name)
except Exception:
pass
def test_single_axis_expand(self):
sets = [
self.valid_sides,
['x', 'y', 'z'],
['1', '2', '3']
]
for group, axis, num in product(*sets):
name = self.templates[0].format(group) + '_' + axis + num
structure.from_name(name)
def test_multi_axis_expand(self):
sets = [
self.valid_sides,
['_x1', '_x2', '_x3'],
['_y1', '_y2', '_y3'],
['_z1', '_z2', '_z3'],
]
for group, x, y, z in product(*sets):
name = self.templates[0].format(group) + x + z + z
structure.from_name(name)
def test_manual_polymer(self):
sets = [
self.templates[1:-1],
self.valid_polymer_sides,
[2, 3, 4],
]
for template, group, num in product(*sets):
name = '_'.join([template.format(group)] * num)
structure.from_name(name)
def test_invalid_manual_polymer(self):
sets = [
self.templates,
self.invalid_polymer_sides,
[2, 3, 4],
]
for template, group, num in product(*sets):
name = '_'.join([template.format(group)] * num)
try:
structure.from_name(name)
if "__" in name:
continue
if any(x.endswith("B") for x in name.split("_TON_")):
continue
self.fail(name)
except Exception:
pass
def test_spot_check(self):
names = [
'5ba_TON_5ba55_TON_345495_2_TON_n6',
'24a_TON_35b_24c',
'TON_24a_24a',
'24a_TON_24a',
'24a_TON',
'4a_TON_n2',
'4a_TON_B_24c_n3',
'4a_TON_35_2_m3',
'TON_24a_24a_TON',
'TON_24a__TON',
'TON__24a_TON',
'4a_TON_5555555555_4a',
'5_TON_n13',
]
for name in names:
structure.from_name(name)
def test_spot_check_invalid(self):
pairs = [
("B_TON_n2",
"(9, 'can not do nm expansion with xgroup on left')"),
("TON_B__m2",
"(9, 'can not do nm expansion with xgroup on middle')"),
("TON__B_n2",
"(9, 'can not do nm expansion with xgroup on right')"),
("TON_TON_m2",
"(8, 'Can not do m expansion and have multiple cores')"),
("TON__B_TON",
"(11, 'can not add core to xgroup on right')")
]
for name, message in pairs:
try:
structure.from_name(name)
self.fail((name, message))
except Exception as e:
self.assertEqual(message, str(e))
class NamedMoleculeTestCase(SimpleTestCase):
templates = [
"{0}_TON",
"CON_{0}",
"TON_{0}_",
"{0}_TPN_{0}",
"{0}_TNN_{0}_",
"CPP_{0}_{0}",
"{0}_TON_{0}_{0}",
"{0}",
]
valid_polymer_sides = ['2', '4b', '22', '24', '4bc', '44bc', '4b4',
'5-', '5-5', '55-', '5-a', '5-ab4-', '4b114b', '3',
'11', '4(25)', '4(25)4']
invalid_polymer_sides = ['B', '2B']
valid_sides = valid_polymer_sides + invalid_polymer_sides
def test_png(self):
sets = [
self.templates,
self.valid_sides,
]
for template, group in product(*sets):
name = template.format(group)
obj = gjfwriter.NamedMolecule(name)
obj.get_png()
def test_svg(self):
sets = [
self.templates,
self.valid_sides,
]
for template, group in product(*sets):
name = template.format(group)
obj = gjfwriter.NamedMolecule(name)
obj.get_svg()
def test_gjf(self):
sets = [
self.templates,
self.valid_sides,
]
for template, group in product(*sets):
name = template.format(group)
obj = gjfwriter.NamedMolecule(name)
obj.get_gjf()
def test_multistep_gjf(self):
sets = [
self.templates,
self.valid_sides,
]
for template, group in product(*sets):
name = template.format(group)
keywords = ["opt b3lyp/6-31g(d,p)", "td b3lyp/6-31g(d,p)"]
obj = gjfwriter.NamedMolecule(name, keywords=keywords)
text = obj.get_gjf()
for i, key in enumerate(keywords):
self.assertIn(key, text)
if i:
self.assertIn("--Link1--", text)
def test_mol2(self):
sets = [
self.templates,
self.valid_sides,
]
for template, group in product(*sets):
name = template.format(group)
obj = gjfwriter.NamedMolecule(name)
obj.get_mol2()
def test_get_exact_name(self):
obj = gjfwriter.NamedMolecule("TON")
value = obj.get_exact_name()
self.assertEqual(value, "A_TON_A_A_n1_m1_x1_y1_z1")
def test_get_exact_name_spacer(self):
obj = gjfwriter.NamedMolecule("TON")
value = obj.get_exact_name(spacers=True)
self.assertEqual(value, "A**_TON_A**_A**_n1_m1_x1_y1_z1")
def test_get_binary_feature_vector(self):
obj = gjfwriter.NamedMolecule("TON")
value = obj.get_binary_feature_vector()
self.assertEqual(value, NAIVE_FEATURE_VECTOR)
def test_get_decay_feature_vector(self):
obj = gjfwriter.NamedMolecule("TON")
value = obj.get_decay_feature_vector()
self.assertEqual(value, DECAY_FEATURE_VECTOR)
def test_get_decay_distance_correction_feature_vector(self):
obj = gjfwriter.NamedMolecule("A_TON_2435254A_A_n1_m1_x1_y1_z1")
value = obj.get_decay_distance_correction_feature_vector()
self.assertEqual(value, DECAY_DISTANCE_CORRECTION_FEATURE_VECTOR)
def test_get_element_counts(self):
obj = gjfwriter.NamedMolecule("TON")
value = obj.get_element_counts()
expected = {'C': 8, 'H': 4, 'N': 2, 'O': 2}
self.assertEqual(value, expected)
def test_get_formula(self):
obj = gjfwriter.NamedMolecule("TON")
value = obj.get_formula()
expected = 'C8H4N2O2'
self.assertEqual(value, expected)
# def test_get_png_data_url(self):
# obj = gjfwriter.NamedMolecule("TON")
# string = obj.get_png_data_url()
# self.assertEqual(PNG_HASH, hashlib.sha224(string).hexdigest())
# def test_get_svg_data_url(self):
# obj = gjfwriter.NamedMolecule("TON")
# string = obj.get_svg_data_url()
# self.assertEqual(SVG_HASH, hashlib.sha224(string).hexdigest())
def test_get_property_limits(self):
expected = {
'm': [-5.5421310841370435, -2.4789919135053662, 2.8719047861895461],
'n': [-5.785486263321105, -2.8531794442346685, 2.8173259725302477],
}
obj = gjfwriter.NamedMolecule("24b_TON")
results = obj.get_property_limits()
self.assertEqual(expected, results)
def test_get_property_limits_polymer(self):
expected = {
'm': [None, None, None],
'n': [-5.785486263321105, -2.8531794442346685, 2.8173259725302477]
}
obj = gjfwriter.NamedMolecule("24b_TON_n2")
results = obj.get_property_limits()
self.assertEqual(expected, results)
def test_autoflip_name(self):
names = (
("5555", "55-55-"),
("4444", "4444"),
("4545", "4545-"),
("TON_5555", "TON_55-55-"),
)
for initial, expected in names:
obj = gjfwriter.NamedMolecule(initial, autoflip=True)
self.assertEqual(obj.name, expected)
def test_perturb_struct(self):
name = "4444"
obj1 = gjfwriter.NamedMolecule(name)
obj2 = gjfwriter.NamedMolecule(name, perturb=1.0)
obj3 = gjfwriter.NamedMolecule(name, perturb=0.0)
diff12 = []
diff13 = []
for atom1, atom2, atom3 in zip(obj1.structure.atoms,
obj2.structure.atoms,
obj3.structure.atoms):
diff12.append(numpy.linalg.norm(atom1.xyz - atom2.xyz))
diff13.append(numpy.linalg.norm(atom1.xyz - atom3.xyz))
eps = 1e-3
self.assertTrue(sum(diff12) > eps)
self.assertTrue(sum(diff13) < eps)
class MolNameTestCase(SimpleTestCase):
pairs = [
('234', '2**3**4aaA**'),
('10234', '10**2**3**4aaA**'),
('1110234', '11**10**2**3**4aaA**'),
('TON', 'A**_TON_A**_A**'),
('2_TON', '2**A**_TON_A**_A**'),
('2-_TON', '2**-A**_TON_A**_A**'),
('4_TON', '4aaA**_TON_A**_A**'),
('4b_TON', '4bbA**_TON_A**_A**'),
('4bc_TON', '4bcA**_TON_A**_A**'),
('44bc_TON', '4aa4bcA**_TON_A**_A**'),
('TON_2', 'A**_TON_A**_2**A**'),
('TON_4', 'A**_TON_A**_4aaA**'),
('TON_4b', 'A**_TON_A**_4bbA**'),
('TON_4bc', 'A**_TON_A**_4bcA**'),
('TON_44bc', 'A**_TON_A**_4aa4bcA**'),
('TON_2_', 'A**_TON_2**A**_A**'),
('TON_4_', 'A**_TON_4aaA**_A**'),
('TON_4b_', 'A**_TON_4bbA**_A**'),
('TON_4bc_', 'A**_TON_4bcA**_A**'),
('TON_44bc_', 'A**_TON_4aa4bcA**_A**'),
('TON_2_TON_2', 'A**_TON_A**_2**_TON_A**_2**A**'),
('TON_4_TON_4', 'A**_TON_A**_4aa_TON_A**_4aaA**'),
('TON_4b_TON_4b', 'A**_TON_A**_4bb_TON_A**_4bbA**'),
('TON_4bc_TON_4bc', 'A**_TON_A**_4bc_TON_A**_4bcA**'),
('TON_44bc_TON_44bc', 'A**_TON_A**_4aa4bc_TON_A**_4aa4bcA**'),
('TON_2_TON_2_TON_2',
'A**_TON_A**_2**_TON_A**_2**_TON_A**_2**A**'),
('TON_4_TON_4_TON_4',
'A**_TON_A**_4aa_TON_A**_4aa_TON_A**_4aaA**'),
('TON_4b_TON_4b_TON_4b',
'A**_TON_A**_4bb_TON_A**_4bb_TON_A**_4bbA**'),
('TON_4bc_TON_4bc_TON_4bc',
'A**_TON_A**_4bc_TON_A**_4bc_TON_A**_4bcA**'),
('TON_44bc_TON_44bc_TON_44bc',
'A**_TON_A**_4aa4bc_TON_A**_4aa4bc_TON_A**_4aa4bcA**'),
('TON_2__TON_2_', 'A**_TON_2**A**__TON_2**A**_A**'),
('TON_4__TON_4_', 'A**_TON_4aaA**__TON_4aaA**_A**'),
('TON_4b__TON_4b_', 'A**_TON_4bbA**__TON_4bbA**_A**'),
('TON_4bc__TON_4bc_', 'A**_TON_4bcA**__TON_4bcA**_A**'),
('TON_44bc__TON_44bc_', 'A**_TON_4aa4bcA**__TON_4aa4bcA**_A**'),
]
polymer_pairs = [
('TON_n2', '_TON_A**__n2_m1'),
('2_TON_n2', '2**_TON_A**__n2_m1'),
('4_TON_n2', '4aa_TON_A**__n2_m1'),
('4b_TON_n2', '4bb_TON_A**__n2_m1'),
('4bc_TON_n2', '4bc_TON_A**__n2_m1'),
('44bc_TON_n2', '4aa4bc_TON_A**__n2_m1'),
('TON_2_n2', '_TON_A**_2**_n2_m1'),
('TON_4_n2', '_TON_A**_4aa_n2_m1'),
('TON_4b_n2', '_TON_A**_4bb_n2_m1'),
('TON_4bc_n2', '_TON_A**_4bc_n2_m1'),
('TON_44bc_n2', '_TON_A**_4aa4bc_n2_m1'),
('TON_2__n2', '_TON_2**A**__n2_m1'),
('TON_4__n2', '_TON_4aaA**__n2_m1'),
('TON_4b__n2', '_TON_4bbA**__n2_m1'),
('TON_4bc__n2', '_TON_4bcA**__n2_m1'),
('TON_44bc__n2', '_TON_4aa4bcA**__n2_m1'),
('TON_2_TON_2_n2', '_TON_A**_2**_TON_A**_2**_n2_m1'),
('TON_4_TON_4_n2', '_TON_A**_4aa_TON_A**_4aa_n2_m1'),
('TON_4b_TON_4b_n2', '_TON_A**_4bb_TON_A**_4bb_n2_m1'),
('TON_4bc_TON_4bc_n2', '_TON_A**_4bc_TON_A**_4bc_n2_m1'),
('TON_44bc_TON_44bc_n2', '_TON_A**_4aa4bc_TON_A**_4aa4bc_n2_m1'),
('TON_2_TON_2_TON_2_n2',
'_TON_A**_2**_TON_A**_2**_TON_A**_2**_n2_m1'),
('TON_4_TON_4_TON_4_n2',
'_TON_A**_4aa_TON_A**_4aa_TON_A**_4aa_n2_m1'),
('TON_4b_TON_4b_TON_4b_n2',
'_TON_A**_4bb_TON_A**_4bb_TON_A**_4bb_n2_m1'),
('TON_4bc_TON_4bc_TON_4bc_n2',
'_TON_A**_4bc_TON_A**_4bc_TON_A**_4bc_n2_m1'),
('TON_44bc_TON_44bc_TON_44bc_n2',
'_TON_A**_4aa4bc_TON_A**_4aa4bc_TON_A**_4aa4bc_n2_m1'),
('TON_2__TON_2__n2', '_TON_2**A**__TON_2**A**__n2_m1'),
('TON_4__TON_4__n2', '_TON_4aaA**__TON_4aaA**__n2_m1'),
('TON_4b__TON_4b__n2', '_TON_4bbA**__TON_4bbA**__n2_m1'),
('TON_4bc__TON_4bc__n2', '_TON_4bcA**__TON_4bcA**__n2_m1'),
('TON_44bc__TON_44bc__n2', '_TON_4aa4bcA**__TON_4aa4bcA**__n2_m1'),
('TON_m2', 'A**_TON__A**_n1_m2'),
('2_TON_m2', '2**A**_TON__A**_n1_m2'),
('4_TON_m2', '4aaA**_TON__A**_n1_m2'),
('4b_TON_m2', '4bbA**_TON__A**_n1_m2'),
('4bc_TON_m2', '4bcA**_TON__A**_n1_m2'),
('44bc_TON_m2', '4aa4bcA**_TON__A**_n1_m2'),
('TON_2_m2', 'A**_TON__2**A**_n1_m2'),
('TON_4_m2', 'A**_TON__4aaA**_n1_m2'),
('TON_4b_m2', 'A**_TON__4bbA**_n1_m2'),
('TON_4bc_m2', 'A**_TON__4bcA**_n1_m2'),
('TON_44bc_m2', 'A**_TON__4aa4bcA**_n1_m2'),
('TON_2__m2', 'A**_TON_2**_A**_n1_m2'),
('TON_4__m2', 'A**_TON_4aa_A**_n1_m2'),
('TON_4b__m2', 'A**_TON_4bb_A**_n1_m2'),
('TON_4bc__m2', 'A**_TON_4bc_A**_n1_m2'),
('TON_44bc__m2', 'A**_TON_4aa4bc_A**_n1_m2'),
('TON__4(20)', 'A**_TON_A**_4(20)aaA**_n1_m1'),
]
def test_brace_expansion(self):
names = [
("a", ["a"]),
("{,a}", ["", "a"]),
("{a,b}", ["a", "b"]),
("{a,b}c", ["ac", "bc"]),
("c{a,b}", ["ca", "cb"]),
("{a,b}{c}", ["ac", "bc"]),
("{c}{a,b}", ["ca", "cb"]),
("{a,b}{c,d}", ["ac", "bc", "ad", "bd"]),
("e{a,b}{c,d}", ["eac", "ebc", "ead", "ebd"]),
("{a,b}e{c,d}", ["aec", "bec", "aed", "bed"]),
("{a,b}{c,d}e", ["ace", "bce", "ade", "bde"]),
("{a,b}{c,d}{e,f}", ["ace", "acf", "ade", "adf",
"bce", "bcf", "bde", "bdf"]),
]
for name, result in names:
self.assertEqual(set(mol_name.name_expansion(name)), set(result))
def test_comma_name_split(self):
names = [
("a,", ["a", ""]),
(",b", ["", "b"]),
("a,b", ["a", "b"]),
]
for name, result in names:
self.assertEqual(set(mol_name.name_expansion(name)), set(result))
def test_group_expansion(self):
names = [
("{$CORES}", constants.CORES),
("{$XGROUPS}", constants.XGROUPS),
("{$RGROUPS}", constants.RGROUPS),
("{$ARYL0}", constants.ARYL0),
("{$ARYL2}", constants.ARYL2),
("{$ARYL}", constants.ARYL),
("{$a}", ['']),
("{$a,$ARYL}", [''] + constants.ARYL),
]
for name, result in names:
self.assertEqual(set(mol_name.name_expansion(name)), set(result))
def test_local_vars(self):
names = [
("{a,b}{$0}", ["aa", "bb"]),
("{a,b}{$0}{$0}", ["aaa", "bbb"]),
("{a,b}{c,d}{$0}{$0}", ["acaa", "bcbb", "adaa", "bdbb"]),
("{a,b}{c,d}{$1}{$1}", ["accc", "bccc", "addd", "bddd"]),
("{a,b}{c,d}{$0}{$1}", ["acac", "bcbc", "adad", "bdbd"]),
]
for name, result in names:
self.assertEqual(set(mol_name.name_expansion(name)), set(result))
def test_name_expansion(self):
names = [
("24{$RGROUPS}_{$CORES}",
["24" + '_'.join(x) for x in product(constants.RGROUPS,
constants.CORES)]),
("24{$XGROUPS}_{$CORES}",
["24" + '_'.join(x) for x in product(constants.XGROUPS,
constants.CORES)]),
("24{$ARYL}_{$CORES}",
["24" + '_'.join(x) for x in product(constants.ARYL,
constants.CORES)]),
]
for name, result in names:
self.assertEqual(set(mol_name.name_expansion(name)), set(result))
def test_local_vars_case(self):
names = [
("{a,b}{$0.U}", ["aA", "bB"]),
("{a,b}{$0.U}{$0}", ["aAa", "bBb"]),
("{a,b}{c,d}{$0.U}{$0.U}", ["acAA", "bcBB", "adAA", "bdBB"]),
("{a,b}{c,d}{$1}{$1.U}", ["accC", "bccC", "addD", "bddD"]),
("{a,b}{c,d}{$0.U}{$1.U}", ["acAC", "bcBC", "adAD", "bdBD"]),
("{A,B}{$0.L}", ["Aa", "Bb"]),
("{A,B}{$0.L}{$0}", ["AaA", "BbB"]),
("{A,B}{C,D}{$0.L}{$0.L}", ["ACaa", "BCbb", "ADaa", "BDbb"]),
("{A,B}{C,D}{$1}{$1.L}", ["ACCc", "BCCc", "ADDd", "BDDd"]),
("{A,B}{C,D}{$0.L}{$1.L}", ["ACac", "BCbc", "ADad", "BDbd"]),
]
for name, result in names:
self.assertEqual(set(mol_name.name_expansion(name)), set(result))
def test_get_exact_name(self):
for name, expected in self.pairs:
a = mol_name.get_exact_name(name)
expected = expected + "_n1_m1_x1_y1_z1"
self.assertEqual(a, expected.replace('*', ''))
def test_get_exact_name_polymer(self):
for name, expected in self.polymer_pairs:
a = mol_name.get_exact_name(name)
expected = expected + "_x1_y1_z1"
self.assertEqual(a, expected.replace('*', ''))
def test_get_exact_name_spacers(self):
for name, expected in self.pairs:
a = mol_name.get_exact_name(name, spacers=True)
expected = expected + "_n1_m1_x1_y1_z1"
self.assertEqual(a, expected)
def test_get_exact_name_polymer_spacers(self):
for name, expected in self.polymer_pairs:
a = mol_name.get_exact_name(name, spacers=True)
expected = expected + "_x1_y1_z1"
self.assertEqual(a, expected)
def test_get_structure_type(self):
tests = [
("TON", constants.BENZO_TWO),
("EON", constants.BENZO_ONE),
("444", constants.CHAIN),
("TON_TON", constants.BENZO_MULTI),
]
for name, expected in tests:
res = mol_name.get_structure_type(name)
self.assertEqual(res, expected)
class ExtractorTestCase(SimpleTestCase):
def test_extractor_command(self):
call_command("extract")
class UpdateMLTestCase(SimpleTestCase):
def setUp(self):
DataPoint(**DATA_POINT).save()
@mock.patch('os.remove')
def test_lock(self, mock_remove):
@lock
def test_function(x):
return x + x
mock_open = mock.mock_open()
with mock.patch('chemtools.management.commands.update_ml.open',
mock_open, create=True):
ret = test_function(1)
self.assertEqual(ret, 2)
self.assertEqual(mock_remove.call_args[0], ('.updating_ml', ))
self.assertEqual(mock_open.call_args[0], ('.updating_ml', 'w'))
@mock.patch('os.path.exists', return_value=True)
def test_lock_exists(self, mock_exists):
@lock
def test_function(x):
return x + x
self.assertIsNone(test_function(1))
@mock.patch('os.remove')
def test_lock_exception(self, mock_remove):
@lock
def test_function(x):
raise ValueError('some error')
mock_open = mock.mock_open()
with mock.patch('chemtools.management.commands.update_ml.open',
mock_open, create=True):
self.assertIsNone(test_function(1))
self.assertEqual(len(mock_remove.mock_calls), 1)
@mock.patch('data.models.Predictor.save')
@mock.patch('data.models.DataPoint.get_all_data')
def test_update_ml(self, mock_get_all_data, mock_save):
X = numpy.random.rand(10, 2)
HOMO = numpy.random.rand(10, 1)
LUMO = numpy.random.rand(10, 1)
GAP = numpy.random.rand(10, 1)
mock_get_all_data.return_value = X, HOMO, LUMO, GAP
call_command("update_ml")
class Model(object):
def __init__(self):
self.weights = None
def get_params(self, *args, **kwargs):
return {}
def fit(self, X, y):
self.weights = numpy.ones(X.shape[1])
def predict(self, X):
return X.dot(self.weights)
class MLTestCase(SimpleTestCase):
def test_get_core_features(self):
cores = [
("TON", [1, 1, 0, 0, 0, 0, 1, 0, 0]),
("CON", [0, 1, 0, 0, 0, 0, 1, 0, 0]),
("COP", [0, 1, 0, 0, 0, 0, 0, 1, 0]),
("COC", [0, 1, 0, 0, 0, 0, 0, 0, 1]),
("CSC", [0, 0, 1, 0, 0, 0, 0, 0, 1]),
("CNC", [0, 0, 0, 1, 0, 0, 0, 0, 1]),
("CPC", [0, 0, 0, 0, 1, 0, 0, 0, 1]),
("CCC", [0, 0, 0, 0, 0, 1, 0, 0, 1]),
]
for core, expected in cores:
vector = ml.get_core_features(core)
self.assertEqual(vector, expected)
def test_get_extra_features(self):
values = [0, 2, 12]
names = "nmxyz"
for numbers in product(values, values, values, values, values):
use = [n + str(v) for n, v in zip(names, numbers)]
vector = ml.get_extra_features(*use)
self.assertEqual(vector, list(numbers))
def test_get_binary_feature_vector(self):
name = "A**_TON_A**_A**_n1_m1_x1_y1_z1"
self.assertEqual(ml.get_binary_feature_vector(name),
NAIVE_FEATURE_VECTOR)
def test_get_decay_feature_vector(self):
name = "A**_TON_A**_A**_n1_m1_x1_y1_z1"
self.assertEqual(ml.get_decay_feature_vector(name),
DECAY_FEATURE_VECTOR)
def test_get_decay_distance_correction_feature_vector(self):
name = "A**_TON_2**4aa3**5aa2**5aa4aaA**_A**_n1_m1_x1_y1_z1"
self.assertEqual(ml.get_decay_distance_correction_feature_vector(name),
DECAY_DISTANCE_CORRECTION_FEATURE_VECTOR)
def test_MultiStageRegression(self):
n = 5
m = 2
p = 3
res = m * p
X = numpy.ones((n, m)) + numpy.arange(n).reshape(-1, 1)
y = numpy.zeros((n, p))
m = ml.MultiStageRegression(model=Model())
m.fit(X, y)
col = numpy.arange(res, res * n + 1, res)
expected = numpy.tile(col, (p, 1)).T
self.assertTrue(numpy.allclose(m.predict(X), expected))
class FileParserTestCase(SimpleTestCase):
def test_parse_files(self):
base = os.path.join(settings.MEDIA_ROOT, "tests")
files = [
"A_TON_A_A.log",
"A_TON_A_A_TD.log",
"A_CON_A_A_TDDFT.log",
"crazy.log",
]
paths = [os.path.join(base, x) for x in files]
logset = fileparser.LogSet()
logset.parse_files(paths)
with StringIO(logset.format_output(errors=False)) as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
expected = [LOG_DATA[x] for x in files]
lines = [row_select(x) for i, x in enumerate(reader) if i]
self.assertEqual(expected, lines)
def test_parse_logs_no_logs(self):
logset = fileparser.LogSet()
logset.parse_files([])
self.assertEqual("\n\n", logset.format_output(errors=False))
def test_format_header(self):
path = os.path.join(settings.MEDIA_ROOT, "tests", "A_TON_A_A.log")
log = fileparser.Log(path)
expected = LOG_DATA['header']
value = log.format_header().split(',')
self.assertEqual(expected, row_select(value))
def test_parse_log_open(self):
name = "A_TON_A_A.log"
path = os.path.join(settings.MEDIA_ROOT, "tests", name)
log = fileparser.Log(path)
with StringIO(log.format_data()) as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
expected = [LOG_DATA[name]]
lines = [row_select(x) for x in reader]
self.assertEqual(expected, lines)
def test_parse_invalid_log(self):
name = "invalid.log"
path = os.path.join(settings.MEDIA_ROOT, "tests", name)
log = fileparser.Log(path)
with StringIO(log.format_data()) as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
expected = [LOG_DATA[name]]
lines = [row_select(x) for x in reader]
self.assertEqual(expected, lines)
with self.assertRaises(Exception):
log.format_gjf()
def test_parse_nonbenzo(self):
name = '1_04_0.log'
path = os.path.join(settings.MEDIA_ROOT, "tests", name)
log = fileparser.Log(path)
with StringIO(log.format_data()) as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
expected = LOG_DATA[name]
for line in reader:
pass
self.assertEqual(expected, row_select(line))
def test_parse_triplet(self):
name = '4_triplet.log'
path = os.path.join(settings.MEDIA_ROOT, "tests", name)
log = fileparser.Log(path)
with StringIO(log.format_data()) as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
expected = LOG_DATA[name]
for line in reader:
pass
self.assertEqual(expected, row_select(line))
def test_parse_nonbenzo_windows(self):
name = "methane_windows.log"
path = os.path.join(settings.MEDIA_ROOT, "tests", name)
log = fileparser.Log(path)
with StringIO(log.format_data()) as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
expected = LOG_DATA[name]
for line in reader:
pass
actual = [x.lower() for x in row_select(line)]
expected = [x.lower() for x in expected]
self.assertEqual(expected, actual)
def test_parse_nonbenzo_windows_td(self):
name = "methane_td_windows.log"
path = os.path.join(settings.MEDIA_ROOT, "tests", name)
log = fileparser.Log(path)
with StringIO(log.format_data()) as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
expected = LOG_DATA[name]
for line in reader:
pass
actual = [x.lower() for x in row_select(line)]
expected = [x.lower() for x in expected]
self.assertEqual(expected, actual)
def test_parse_multistep_log(self):
name = "A.log"
path = os.path.join(settings.MEDIA_ROOT, "tests", name)
log = fileparser.Log(path)
with StringIO(log.format_data()) as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
expected = [[y.lower() for y in x] for x in LOG_DATA[name]]
actual = [[y.lower() for y in row_select(x)] for x in reader]
self.assertEqual(expected, actual)
def test_parse_log_format_gjf(self):
name = "A.log"
path = os.path.join(settings.MEDIA_ROOT, "tests", name)
log = fileparser.Log(path)
actual = log.format_gjf()
expected = "%nprocshared=1\n%mem=12GB\n%chk=A.chk\n"
expected += "# td B3LYP/6-31g(d,p) geom=check guess=read\n\nA\n\n"
expected += "0 1\nH 0.3784566169 0. 0.\nH 1.1215433831 0. 0.\n\n"
self.assertEqual(expected, actual)
def test_parse_log_format_gjf_td(self):
name = "A_TON_A_A.log"
path = os.path.join(settings.MEDIA_ROOT, "tests", name)
log = fileparser.Log(path)
actual = log.format_gjf(td=True).split("\n")[:4]
expected = [
"%nprocshared=16",
"%mem=59GB",
"%chk=A_TON_A_A_TD.chk",
"# td b3lyp/6-31g(d)",
]
self.assertEqual(expected, actual)
def test_parse_log_transform(self):
name = "transform.log"
path = os.path.join(settings.MEDIA_ROOT, "tests", name)
log = fileparser.Log(path)
self.assertIsNotNone(log.Rot)
self.assertIsNotNone(log.trans)
def test_parse_log_format_out(self):
name = "A.log"
path = os.path.join(settings.MEDIA_ROOT, "tests", name)
log = fileparser.Log(path)
actual = log.format_out()
expected = "H 0.3784566169 0. 0.\nH 1.1215433831 0. 0.\n"
self.assertEqual(expected, actual)
def test_parse_log_format_outx(self):
name = "A.log"
path = os.path.join(settings.MEDIA_ROOT, "tests", name)
log = fileparser.Log(path)
actual = log.format_outx()
# TODO Needs a better test
self.assertEqual(5, len(actual))
def test_parse_odd_force(self):
name = "odd_force.log"
path = os.path.join(settings.MEDIA_ROOT, "tests", name)
log = fileparser.Log(path)
# This is a two part test, one it is making sure that it does not
# blow up with an error, and it needs an additional check for the
# value.
actual = log.format_outx()
self.assertEqual(146 , len(actual))
# Check if one of the huge vaules in it
self.assertIn("-50394.5620476", actual[1])
def test_Output_newline(self):
out = fileparser.Output()
string = "Some message"
out.write(string, newline=False)
result = out.format_output(errors=False)
self.assertEqual(result, string + '\n')
def test_catch(self):
class TestIt(fileparser.Output):
@fileparser.catch
def get_fail(self):
raise ValueError("some string")
test = TestIt()
test.get_fail()
expected = "\n---- Errors (1) ----\nValueError('some string',)\n"
self.assertEqual(test.format_output(errors=True), expected)
class UtilsTestCase(SimpleTestCase):
def test_replace_geom_vars(self):
geom, variables = METHANE.strip().split("\n\n")
results = utils.replace_geom_vars(geom, variables)
self.assertEqual(METHANE_REPLACED.strip(), results)
def test_convert_zmatrix_to_cart_meth(self):
geom, variables = METHANE.strip().split("\n\n")
string = utils.replace_geom_vars(geom, variables)
results = utils.convert_zmatrix_to_cart(string)
self.assertEqual(METHANE_CART.strip(), results.strip())
def test_convert_zmatrix_to_cart_benz(self):
geom, variables = BENZENE.strip().split("\n\n")
string = utils.replace_geom_vars(geom, variables)
results = utils.convert_zmatrix_to_cart(string)
self.assertEqual(BENZENE_CART.strip(), results.strip())
def test_find_repeating(self):
tests = (
("4", ('4', 1)),
("44", ('4', 2)),
("4444", ('4', 4)),
("4a4a", ('4a', 2)),
("4ab4ab4ab", ('4ab', 3)),
("4ab4ab5", ('4ab4ab5', 1)),
("4ab54ab5", ('4ab5', 2)),
(["11", "12"], (["11", "12"], 1))
)
for value, expected in tests:
result = utils.find_repeating(value)
self.assertEqual(result, expected)
class GraphTestCase(SimpleTestCase):
def test_graph(self):
# doesn't break
self.assertEqual(graph.run_name("TON"), set(["TON"]))
# multi cores
self.assertEqual(graph.run_name("TON_TON"), set(["TON"]))
# opposite cores
self.assertEqual(graph.run_name("TON_CON"), set(["TON", "CON"]))
# left side
self.assertEqual(graph.run_name("4_TON"), set(["TON", '4']))
# middle sides
self.assertEqual(graph.run_name("TON_4_"), set(["TON", '4']))
# right side
self.assertEqual(graph.run_name("TON__4"), set(["TON", '4']))
# left and right sides
self.assertEqual(graph.run_name("5_TON__4"), set(["TON", '4', '5']))
# multi left
self.assertEqual(graph.run_name("TON__45"), set(["TON", '4', '5']))
# multi right
self.assertEqual(graph.run_name("45_TON"), set(["TON", '4', '5']))
# multi middle
self.assertEqual(graph.run_name("TON_45_"), set(["TON", '4', '5']))
# all sides
# Test case broken by start -= hack in 0aa6824
# self.assertEqual(
# graph.run_name("45_TON_67_89"), set(["TON", '4', '5', '6', '7', '8', '9']))
# sides and cores
self.assertEqual(graph.run_name("TON__4_TON"), set(["TON", '4']))
# side types
# Test case broken by start -= hack in 0aa6824
# self.assertEqual(graph.run_name("TON__23456789"), set(
# ["TON", '2', '3', '4', '5', '6', '7', '8', '9']))
# side types
self.assertEqual(
graph.run_name("TON__10111213"), set(["TON", '10', '11', '12', '13']))
# big
# Test case broken by start -= hack in 0aa6824
# self.assertEqual(graph.run_name("TON_7_CCC_94_EON"), set(
# ["TON", '7', "CCC", '9', '4', "E/ZON"]))
class RandomGenTestCase(SimpleTestCase):
def test_random_names(self):
names = [x for x in random_gen.random_names("2", "*", flip=[''], n=1, max_layers=1)]
self.assertEqual(names, ["2**"])
names = [x for x in random_gen.random_names("4", "a", flip=[''], n=1, max_layers=1)]
self.assertEqual(names, ["4aa"])
names = [x for x in random_gen.random_names("4", "a", n=1, max_layers=1)]
self.assertEqual(names, ["4aa"])
def test_random_names_sets(self):
aryl = ['4', '5']
rgroups = ['a', 'b']
flip = ['', '-']
expected = [''.join(x) for x in product(aryl, rgroups, rgroups, aryl, rgroups, rgroups, flip)]
expected += [''.join(x) for x in product(aryl, rgroups, rgroups)]
expected = set(expected)
names = [x for x in random_gen.random_names(aryl, rgroups, flip=flip, n=100, max_layers=2)]
names = set(names)
self.assertTrue(names & expected)
self.assertFalse(names.difference(expected))
def test_all_layers_same(self):
layer = ["4aa"]
layers = set([x for x in random_gen.all_layers_same(layer, max_layers=3)])
expected = set([
'4aa', '4aa4aa', '4aa4aa-', '4aa4aa4aa',
'4aa4aa-4aa', '4aa4aa4aa-', '4aa4aa-4aa-',
])
self.assertEqual(layers, expected)
|
crcollins/chemtools-webapp
|
chemtools/tests.py
|
Python
|
mit
| 52,884
|
[
"ADF"
] |
e8edfc8732bb7c8fe1a5dedb34249f43b0518d09f5c0e8a356aa05a09a5ebc31
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=invalid-name
#! /usr/bin/python
# Convert an acor file into VTK format (specifically a vtp file)
from xml.dom import minidom
def convertToVTU(infile, outpath):
#first need to find some things from the file
datafile = open(infile,'r')
datalist=[]
planelist=[]
npoints = 0
for line in datafile:
numbers = line.split()
if len(numbers) != 4 :
continue
if npoints == 0 :
curz = numbers[2]
if numbers[2] != curz :
datalist.append(planelist)
curz = numbers[2]
planelist=[]
planelist.append(numbers)
npoints += 1
# Append last set
datalist.append(planelist)
datafile.close()
ncells = len(datalist)
doc = minidom.Document()
vtkfile = doc.createElement("VTKFile")
doc.appendChild(vtkfile)
vtkfile.setAttribute("type","UnstructuredGrid")
vtkfile.setAttribute("version","0.1")
vtkfile.setAttribute("byte_order", "LittleEndian")
ugrid = doc.createElement("UnstructuredGrid")
vtkfile.appendChild(ugrid)
piece = doc.createElement("Piece")
ugrid.appendChild(piece)
piece.setAttribute( "NumberOfPoints", str(npoints))
piece.setAttribute( "NumberOfCells", str(ncells))
# First the PointData element
point_data = doc.createElement("PointData")
piece.appendChild(point_data)
point_data.setAttribute("Scalars", "Intensity")
data_array = doc.createElement("DataArray")
point_data.appendChild(data_array)
data_array.setAttribute("type", "Float32")
data_array.setAttribute("Name", "Intensity")
data_array.setAttribute("format","ascii")
for plane in datalist:
for point in plane:
txt = doc.createTextNode(str(point[3]))
data_array.appendChild(txt)
# Now the Points element
points = doc.createElement("Points")
piece.appendChild(points)
data_array = doc.createElement("DataArray")
points.appendChild(data_array)
data_array.setAttribute("type", "Float32")
data_array.setAttribute("NumberOfComponents", "3")
data_array.setAttribute("format","ascii")
for plane in datalist:
for point in plane:
txt = doc.createTextNode(str(point[0]) + " " + str(point[1]) + " " +str(point[2]))
data_array.appendChild(txt)
cells = doc.createElement("Cells")
piece.appendChild(cells)
data_array = doc.createElement("DataArray")
cells.appendChild(data_array)
data_array.setAttribute("type", "Int32")
data_array.setAttribute("Name", "connectivity")
data_array.setAttribute("format","ascii")
i = 0
for plane in datalist:
for point in plane:
txt = doc.createTextNode(str(i))
data_array.appendChild(txt)
i += 1
data_array = doc.createElement("DataArray")
cells.appendChild(data_array)
data_array.setAttribute("type", "Int32")
data_array.setAttribute("Name", "offsets")
data_array.setAttribute("format","ascii")
i = 0
for plane in datalist:
i += len(plane)
txt = doc.createTextNode(str(i))
data_array.appendChild(txt)
data_array = doc.createElement("DataArray")
cells.appendChild(data_array)
data_array.setAttribute("type", "Int32")
data_array.setAttribute("Name", "types")
data_array.setAttribute("format","ascii")
for plane in datalist:
txt = doc.createTextNode("4")
data_array.appendChild(txt)
#print doc.toprettyxml(newl="\n")
shortname = infile.split('/')
name = outpath + shortname[len(shortname)-1] + ".vtu"
handle = open(name,'w')
doc.writexml(handle, newl="\n")
handle.close()
del datalist
del planelist
del doc
def writeParallelVTU(files, prefix):
doc = minidom.Document()
vtkfile = doc.createElement("VTKFile")
doc.appendChild(vtkfile)
vtkfile.setAttribute("type","PUnstructuredGrid")
vtkfile.setAttribute("version","0.1")
vtkfile.setAttribute("byte_order", "LittleEndian")
pugrid = doc.createElement("PUnstructuredGrid")
vtkfile.appendChild(pugrid)
pugrid.setAttribute("GhostLevel", "0")
ppointdata = doc.createElement("PPointData")
pugrid.appendChild(ppointdata)
ppointdata.setAttribute("Scalars","Intensity")
data_array = doc.createElement("PDataArray")
ppointdata.appendChild(data_array)
data_array.setAttribute("type","Float32")
data_array.setAttribute("Name","Intensity")
ppoints = doc.createElement("PPoints")
pugrid.appendChild(ppoints)
data_array = doc.createElement("PDataArray")
ppoints.appendChild(data_array)
data_array.setAttribute("type","Float32")
data_array.setAttribute("NumberOfComponents","3")
for name in files:
piece = doc.createElement("Piece")
pugrid.appendChild(piece)
piece.setAttribute("Source",name + ".vtu")
# print doc.toprettyxml(newl="\n")
filename = prefix + files[0].split('.')[0] + ".pvtu"
# print filename
handle = open(filename,'w')
doc.writexml(handle, newl="\n")
handle.close()
|
mganeva/mantid
|
tools/VTKConverter/VTKConvert.py
|
Python
|
gpl-3.0
| 5,381
|
[
"VTK"
] |
e061414d0a35af040ee4f18ff50ec3a207b8fa20511abd8cc70eae1f9f0974ea
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Initialize and configure Flask-Script extension.
Configuration
^^^^^^^^^^^^^
The following configuration variables are provided:
===================== =======================================================
`bind address` Preferred binding address of the server. Can be used to
select a specific interface or to bind to all via
`0.0.0.0`.
`bind port` Preferred binding port of the server. Can differ from
the one stated in `CFG_SITE_URL` so it can be accessed
via reverse proxy.
===================== =======================================================
They are assigned by the following parameters, in decreasing priority:
1. Command line arguments of `inveniomanage runserver`
2. `SERVER_BIND_ADDRESS` and `SERVER_BIND_PORT` configuration
3. Values guessed from `CFG_SITE_URL`
4. Defaults (`127.0.0.1:80`)
"""
from __future__ import print_function
import functools
import re
import ssl
from types import FunctionType
from flask import current_app, flash
from flask_registry import ModuleAutoDiscoveryRegistry, RegistryProxy
from flask_script import Manager as FlaskExtManager
from flask_script.commands import Clean, Server, Shell, ShowUrls
from invenio.base.signals import post_command, pre_command
from six.moves import urllib
from werkzeug.utils import find_modules, import_string
def change_command_name(method=None, new_name=None):
"""Change command name to `new_name` or replace '_' by '-'."""
if method is None:
return functools.partial(change_command_name, new_name=new_name)
if new_name is None:
new_name = method.__name__.replace('_', '-')
method.__name__ = new_name
return method
def generate_secret_key():
"""Generate secret key."""
import string
import random
rng = random.SystemRandom()
return ''.join(
rng.choice(string.ascii_letters + string.digits)
for dummy in range(0, 256)
)
def print_progress(p, L=40, prefix='', suffix=''):
"""Print textual progress bar."""
bricks = int(p * L)
print('\r{prefix} [{bricks}{spaces}] {progress}% {suffix}'.format(
prefix=prefix, suffix=suffix,
bricks='#' * bricks, spaces=' ' * (L - bricks),
progress=int(p * 100),
), end=' ')
def check_for_software_updates(flash_message=False):
"""Check for a new release of Invenio.
:return: True if you have latest version, else False if you need to upgrade
or None if server was not reachable.
"""
from invenio.config import CFG_VERSION
from invenio.base.i18n import _
try:
find = re.compile('Invenio v[0-9]+.[0-9]+.[0-9]+(\-rc[0-9])?'
' is released')
release_notes = 'https://raw.githubusercontent.com/' \
'inveniosoftware/invenio/master/RELEASE-NOTES'
webFile = urllib.request.urlopen(release_notes)
temp = ""
version = ""
version1 = ""
while 1:
temp = webFile.readline()
match1 = find.match(temp)
try:
version = match1.group()
break
except Exception:
pass
if not temp:
break
webFile.close()
submatch = re.compile('[0-9]+.[0-9]+.[0-9]+(\-rc[0-9])?')
version1 = submatch.search(version)
web_version = version1.group().split(".")
local_version = CFG_VERSION.split(".")
if (web_version[0] > local_version[0] or
web_version[0] == local_version[0] and
web_version[1] > local_version[1] or
web_version[0] == local_version[0] and
web_version[1] == local_version[1] and
web_version[2] > local_version[2]):
if flash_message:
flash(_('A newer version of Invenio is available for '
'download. You may want to visit '
'<a href="%(wiki)s">%()s</a>',
wiki='<a href=\"http://invenio-software.org/wiki/'
'/Installation/Download'), 'warning')
return False
except Exception as e:
print(e)
if flash_message:
flash(_('Cannot download or parse release notes '
'from %(release_notes)s', release_notes=release_notes),
'error')
return None
return True
class Manager(FlaskExtManager):
"""Custom manager implementation with signaling support."""
def add_command(self, name, command):
"""Wrap default ``add_command`` method."""
sender = command.run if type(command.run) is FunctionType \
else command.__class__
class SignalingCommand(command.__class__):
def __call__(self, *args, **kwargs):
app = self.app if not len(args) else args[0]
with app.test_request_context():
pre_command.send(sender, args=args, **kwargs)
res = super(SignalingCommand, self).__call__(*args, **kwargs)
with app.test_request_context():
post_command.send(sender, args=args, **kwargs)
return res
command.__class__ = SignalingCommand
return super(Manager, self).add_command(name, command)
def set_serve_static_files(sender, *args, **kwargs):
"""Enable serving of static files for `runserver` command.
Normally Apache serves static files, but during development and if you are
using the Werkzeug standalone development server, you can set this flag to
`True`, to enable static file serving.
"""
current_app.config.setdefault('CFG_FLASK_SERVE_STATIC_FILES', True)
pre_command.connect(set_serve_static_files, sender=Server)
def create_ssl_context(config):
"""Create :class:`ssl.SSLContext` from application config.
:param config: Dict-like application configuration.
:returns: A valid context or in case TLS is not enabled `None`.
The following configuration variables are processed:
============================ ==============================================
`SERVER_TLS_ENABLE` If `True`, a SSL context will be created. In
this case, the required configuration
variables must be provided.
`SERVER_TLS_KEY` (required) Filepath (string) of private key provided as
PEM file.
`SERVER_TLS_CERT` (required) Filepath (string) of your certificate plus
all intermediate certificate, concatenated in
that order and stored as PEM file.
`SERVER_TLS_KEYPASS` If private key is encrypted, a password can be
provided.
`SERVER_TLS_PROTOCOL` String that selects a protocol from
`ssl.PROTOCOL_*`. Defaults to `SSLv23`. See
:mod:`ssl` for details.
`SERVER_TLS_CIPHERS` String that selects possible ciphers according
to the `OpenSSL cipher list format
<https://www.openssl.org/docs/apps/
ciphers.html>`_
`SERVER_TLS_DHPARAMS` Filepath (string) to parameters for
Diffie-Helman key exchange. If not set the
built-in parameters are used.
`SERVER_TLS_ECDHCURVE` Curve (string) that should be used for
Elliptic Curve-based Diffie-Helman key
exchange. If not set, the defaults provided by
OpenSSL are used.
============================ ==============================================
.. note:: In case `None` is returned because of a non-enabling
configuration, TLS will be disabled. It is **not** possible to have a
TLS and non-TLS configuration at the same time. So if TLS is activated,
no non-TLS connection are accepted.
.. important:: Keep in mind to change `CFG_SITE_URL` and
`CFG_SITE_SECURE_URL` according to your TLS configuration. This does
not only include the protocol (`http` vs `https`) but also the hostname
that has to match the common name in your certificate. If a wildcard
certificate is provided, the hostname stated in
`CFG_SITE[_SECURE]_URL` must match the wildcard pattern.
"""
ssl_context = None
if config.get('SERVER_TLS_ENABLE', False):
if 'SERVER_TLS_KEY' not in config \
or 'SERVER_TLS_CERT' not in config:
raise AttributeError(
'`SERVER_TLS_KEY` and `SERVER_TLS_CERT` required!'
)
# CLIENT_AUTH creates a server context, so do not get confused here
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
if 'SERVER_TLS_PROTOCOL' in config:
ssl_context.protocol = getattr(
ssl,
'PROTOCOL_{}'.format(config.get('SERVER_TLS_PROTOCOL'))
)
ssl_context.load_cert_chain(
certfile=config.get('SERVER_TLS_CERT'),
keyfile=config.get('SERVER_TLS_KEY'),
password=config.get('SERVER_TLS_KEYPASS', None)
)
if 'SERVER_TLS_CIPHERS' in config:
ssl_context.set_ciphers(
config.get('SERVER_TLS_CIPHERS')
)
if 'SERVER_TLS_DHPARAMS' in config:
ssl_context.load_dh_params(
config.get('SERVER_TLS_DHPARAMS')
)
if 'SERVER_TLS_ECDHCURVE' in config:
ssl_context.set_ecdh_curve(
config.get('SERVER_TLS_ECDHCURVE')
)
# that one seems to be required for werkzeug
ssl_context.check_hostname = False
return ssl_context
def register_manager(manager):
"""Register all manager plugins and default commands with the manager."""
from six.moves.urllib.parse import urlparse
managers = RegistryProxy('managers', ModuleAutoDiscoveryRegistry, 'manage')
with manager.app.app_context():
for script in find_modules('invenio.base.scripts'):
manager.add_command(script.split('.')[-1],
import_string(script + ':manager'))
for script in managers:
if script.__name__ == 'invenio.base.manage':
continue
manager.add_command(script.__name__.split('.')[-2],
getattr(script, 'manager'))
manager.add_command("clean", Clean())
manager.add_command("show-urls", ShowUrls())
manager.add_command("shell", Shell())
parsed_url = urlparse(manager.app.config.get('CFG_SITE_URL'))
host = manager.app.config.get(
'SERVER_BIND_ADDRESS',
parsed_url.hostname or '127.0.0.1'
)
port = manager.app.config.get(
'SERVER_BIND_PORT',
parsed_url.port or 80
)
ssl_context = create_ssl_context(manager.app.config)
runserver = Server(host=host, port=port, ssl_context=ssl_context)
manager.add_command("runserver", runserver)
# FIXME separation of concerns is violated here.
from invenio.ext.collect import collect
collect.init_script(manager)
from invenio.ext.assets import command, bower
manager.add_command("assets", command)
manager.add_command("bower", bower)
|
switowski/invenio
|
invenio/ext/script/__init__.py
|
Python
|
gpl-2.0
| 12,359
|
[
"VisIt"
] |
1e0a79b3b00d3e956e8d40aa8c07644fa5bbbf1a63963367927e5cde26642b3f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os
import shutil
import subprocess
import numpy as np
from vasp.incar import Incar
from vasp.chgcar import Chgcar
from vasp.kpoints import Kpoints
from ph_analysis.conf_creation import ConfCreation
__author__ = "Yuji Ikeda"
class FiniteDisplacer(object):
def __init__(self, directory_data, dim, distance, thrown_file=None):
self._directory_data = directory_data
self._dim = dim
self._distance = distance
self._thrown_file = thrown_file
self._ispin = None
self._magmom = None
def run(self):
dim = self._dim
distance = self._distance
root = os.getcwd()
dirname = '_'.join([str(x) for x in dim]) + '/' + str(distance)
if not os.path.isdir(dirname):
os.makedirs(dirname)
os.chdir(dirname)
self.copy_files()
self.extract_ispin()
if self._ispin == '2':
self.analyze_chgcar()
self.modify_files()
self.create_poscars()
self.create_directories()
os.chdir(root)
def copy_files(self):
directory_data = self._directory_data
thrown_file = self._thrown_file
shutil.copy2(directory_data + '/POSCAR', 'POSCAR_initial')
shutil.copy2(directory_data + '/CONTCAR', 'POSCAR')
shutil.copy2(directory_data + '/KPOINTS', 'KPOINTS')
shutil.copy2(directory_data + '/POTCAR', 'POTCAR')
shutil.copy2(directory_data + '/INCAR', 'INCAR')
if self._dim == [1, 1, 1]:
shutil.copy2(directory_data + '/CHGCAR', 'CHGCAR')
if thrown_file is not None:
shutil.copy2(directory_data + '/' + thrown_file, thrown_file)
def analyze_chgcar(self):
"""Analyze CHGCAR
magmom is used in:
1. INCAR: To give initial guess for dim != [1, 1, 1]
2. disp.conf and write_fc.conf: To find symmetry operations
with the consideration of magnetic moments
"""
chgcar = Chgcar('CHGCAR')
chgcar.generate_atomic_charge()
chgcar.write_atomic_charge()
scaling = 1.5
magmom = chgcar.get_atomic_charge()[1] * scaling
self._magmom = magmom
def extract_ispin(self):
incar = Incar('INCAR')
ispin = incar.get_dictionary().get('ISPIN', '1')
self._ispin = ispin
def modify_files(self):
dim = self._dim
magmom = self._magmom
# INCAR
incar = Incar('INCAR')
if dim == [1, 1, 1]:
icharg = '1'
else:
icharg = None
incar.generate_supercell(dim) # MAGMOM
if magmom is not None:
n = np.product(dim)
magmom_supercell = [x for x in magmom for _ in range(n)]
magmom_str = ' '.join(
['{:.4f}'.format(x) for x in magmom_supercell]
)
else:
magmom_str = None
incar_overwritten = {
'EDIFF': '1.0E-8',
'NELM': '500',
'NSW': '1',
'ISIF': '2',
'LWAVE': '.FALSE.',
'LCHARG': '.FALSE.',
'ICHARG': icharg,
'MAGMOM': magmom_str,
}
incar.update_dictionary(incar_overwritten)
incar.write('INCAR')
# KPOINTS
kpoints = Kpoints('KPOINTS')
kpoints.generate_supercell(dim)
kpoints.write('KPOINTS')
def create_poscars(self):
dim = self._dim
distance = self._distance
magmom = self._magmom
conf_creation = ConfCreation(
dim=dim,
distance=distance,
magmom=magmom
)
conf_creation.run()
subprocess.call(
'phonopy -v disp.conf > phonopy_disp.log',
shell=True,
)
def create_directories(self):
thrown_file = self._thrown_file
root = os.getcwd()
dir_list = sorted(os.listdir('.'))
poscar_list = [p for p in dir_list if 'POSCAR-' in p]
for poscar in poscar_list:
disp_dir = poscar.replace('POSCAR-', 'disp')
if os.path.exists(disp_dir):
shutil.rmtree(disp_dir)
os.mkdir(disp_dir)
os.chdir(disp_dir)
shutil.copy2('../' + poscar, 'POSCAR')
shutil.copy2('../INCAR' , '.')
shutil.copy2('../POTCAR' , '.')
shutil.copy2('../KPOINTS' , '.')
if thrown_file is not None:
shutil.copy2('../' + thrown_file, '.')
if os.path.exists('../CHGCAR'):
os.symlink('../CHGCAR', 'CHGCAR')
os.chdir(root)
|
yuzie007/ph_analysis
|
ph_analysis/finite_displacer.py
|
Python
|
mit
| 4,797
|
[
"VASP",
"phonopy"
] |
d3d059114d2dd271ad7aee9071512ac7745dbb2a91138b37e255fcef7d58544b
|
# -*- coding: utf-8 -*-
"""Functions to make 3D plots with M/EEG data."""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
# Mark Wronkiewicz <wronk.mark@gmail.com>
#
# License: Simplified BSD
import base64
from distutils.version import LooseVersion
from itertools import cycle
import os.path as op
import warnings
from functools import partial
import numpy as np
from scipy import linalg
from ..defaults import DEFAULTS
from ..externals.six import BytesIO, string_types, advance_iterator
from ..io import _loc_to_coil_trans, Info
from ..io.pick import pick_types
from ..io.constants import FIFF
from ..io.meas_info import read_fiducials
from ..source_space import SourceSpaces, _create_surf_spacing, _check_spacing
from ..surface import (_get_head_surface, get_meg_helmet_surf, read_surface,
transform_surface_to, _project_onto_surface,
complete_surface_info, mesh_edges)
from ..transforms import (read_trans, _find_trans, apply_trans,
combine_transforms, _get_trans, _ensure_trans,
invert_transform, Transform)
from ..utils import (get_subjects_dir, logger, _check_subject, verbose, warn,
_import_mlab, SilenceStdout, has_nibabel, check_version,
_ensure_int)
from .utils import (mne_analyze_colormap, _prepare_trellis, COLORS, plt_show,
tight_layout, figure_nobar)
FIDUCIAL_ORDER = (FIFF.FIFFV_POINT_LPA, FIFF.FIFFV_POINT_NASION,
FIFF.FIFFV_POINT_RPA)
def _fiducial_coords(points, coord_frame=None):
"""Generate 3x3 array of fiducial coordinates."""
if coord_frame is not None:
points = (p for p in points if p['coord_frame'] == coord_frame)
points_ = dict((p['ident'], p) for p in points if
p['kind'] == FIFF.FIFFV_POINT_CARDINAL)
if points_:
return np.array([points_[i]['r'] for i in FIDUCIAL_ORDER])
else:
return np.array([])
def plot_head_positions(pos, mode='traces', cmap='viridis', direction='z',
show=True):
"""Plot head positions.
Parameters
----------
pos : ndarray, shape (n_pos, 10)
The head position data.
mode : str
Can be 'traces' (default) to show position and quaternion traces,
or 'field' to show the position as a vector field over time.
The 'field' mode requires matplotlib 1.4+.
cmap : matplotlib Colormap
Colormap to use for the trace plot, default is "viridis".
direction : str
Can be any combination of "x", "y", or "z" (default: "z") to show
directional axes in "field" mode.
show : bool
Show figure if True. Defaults to True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
from ..chpi import head_pos_to_trans_rot_t
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from mpl_toolkits.mplot3d import axes3d # noqa: F401, analysis:ignore
if not isinstance(mode, string_types) or mode not in ('traces', 'field'):
raise ValueError('mode must be "traces" or "field", got %s' % (mode,))
trans, rot, t = head_pos_to_trans_rot_t(pos) # also ensures pos is okay
# trans, rot, and t are for dev_head_t, but what we really want
# is head_dev_t (i.e., where the head origin is in device coords)
use_trans = np.einsum('ijk,ik->ij', rot[:, :3, :3].transpose([0, 2, 1]),
-trans) * 1000
use_rot = rot.transpose([0, 2, 1])
use_quats = -pos[:, 1:4] # inverse (like doing rot.T)
if cmap == 'viridis' and not check_version('matplotlib', '1.5'):
warn('viridis is unavailable on matplotlib < 1.4, using "YlGnBu_r"')
cmap = 'YlGnBu_r'
if mode == 'traces':
fig, axes = plt.subplots(3, 2, sharex=True)
labels = ['xyz', ('$q_1$', '$q_2$', '$q_3$')]
for ii, (quat, coord) in enumerate(zip(use_quats.T, use_trans.T)):
axes[ii, 0].plot(t, coord, 'k')
axes[ii, 0].set(ylabel=labels[0][ii], xlim=t[[0, -1]])
axes[ii, 1].plot(t, quat, 'k')
axes[ii, 1].set(ylabel=labels[1][ii], xlim=t[[0, -1]])
for ii, title in enumerate(('Position (mm)', 'Rotation (quat)')):
axes[0, ii].set(title=title)
axes[-1, ii].set(xlabel='Time (s)')
else: # mode == 'field':
if not check_version('matplotlib', '1.4'):
raise RuntimeError('The "field" mode requires matplotlib version '
'1.4+')
fig, ax = plt.subplots(1, subplot_kw=dict(projection='3d'))
# First plot the trajectory as a colormap:
# http://matplotlib.org/examples/pylab_examples/multicolored_line.html
pts = use_trans[:, np.newaxis]
segments = np.concatenate([pts[:-1], pts[1:]], axis=1)
norm = Normalize(t[0], t[-2])
lc = Line3DCollection(segments, cmap=cmap, norm=norm)
lc.set_array(t[:-1])
ax.add_collection(lc)
# now plot the head directions as a quiver
dir_idx = dict(x=0, y=1, z=2)
for d, length in zip(direction, [1., 0.5, 0.25]):
use_dir = use_rot[:, :, dir_idx[d]]
# draws stems, then heads
array = np.concatenate((t, np.repeat(t, 2)))
ax.quiver(use_trans[:, 0], use_trans[:, 1], use_trans[:, 2],
use_dir[:, 0], use_dir[:, 1], use_dir[:, 2], norm=norm,
cmap=cmap, array=array, pivot='tail', length=length)
mins = use_trans.min(0)
maxs = use_trans.max(0)
scale = (maxs - mins).max() / 2.
xlim, ylim, zlim = (maxs + mins)[:, np.newaxis] / 2. + [-scale, scale]
ax.set(xlabel='x', ylabel='y', zlabel='z',
xlim=xlim, ylim=ylim, zlim=zlim, aspect='equal')
ax.view_init(30, 45)
tight_layout(fig=fig)
plt_show(show)
return fig
def plot_evoked_field(evoked, surf_maps, time=None, time_label='t = %0.0f ms',
n_jobs=1):
"""Plot MEG/EEG fields on head surface and helmet in 3D.
Parameters
----------
evoked : instance of mne.Evoked
The evoked object.
surf_maps : list
The surface mapping information obtained with make_field_map.
time : float | None
The time point at which the field map shall be displayed. If None,
the average peak latency (across sensor types) is used.
time_label : str
How to print info about the time instant visualized.
n_jobs : int
Number of jobs to run in parallel.
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
types = [t for t in ['eeg', 'grad', 'mag'] if t in evoked]
time_idx = None
if time is None:
time = np.mean([evoked.get_peak(ch_type=t)[1] for t in types])
if not evoked.times[0] <= time <= evoked.times[-1]:
raise ValueError('`time` (%0.3f) must be inside `evoked.times`' % time)
time_idx = np.argmin(np.abs(evoked.times - time))
types = [sm['kind'] for sm in surf_maps]
# Plot them
mlab = _import_mlab()
alphas = [1.0, 0.5]
colors = [(0.6, 0.6, 0.6), (1.0, 1.0, 1.0)]
colormap = mne_analyze_colormap(format='mayavi')
colormap_lines = np.concatenate([np.tile([0., 0., 255., 255.], (127, 1)),
np.tile([0., 0., 0., 255.], (2, 1)),
np.tile([255., 0., 0., 255.], (127, 1))])
fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
_toggle_mlab_render(fig, False)
for ii, this_map in enumerate(surf_maps):
surf = this_map['surf']
map_data = this_map['data']
map_type = this_map['kind']
map_ch_names = this_map['ch_names']
if map_type == 'eeg':
pick = pick_types(evoked.info, meg=False, eeg=True)
else:
pick = pick_types(evoked.info, meg=True, eeg=False, ref_meg=False)
ch_names = [evoked.ch_names[k] for k in pick]
set_ch_names = set(ch_names)
set_map_ch_names = set(map_ch_names)
if set_ch_names != set_map_ch_names:
message = ['Channels in map and data do not match.']
diff = set_map_ch_names - set_ch_names
if len(diff):
message += ['%s not in data file. ' % list(diff)]
diff = set_ch_names - set_map_ch_names
if len(diff):
message += ['%s not in map file.' % list(diff)]
raise RuntimeError(' '.join(message))
data = np.dot(map_data, evoked.data[pick, time_idx])
# Make a solid surface
vlim = np.max(np.abs(data))
alpha = alphas[ii]
mesh = _create_mesh_surf(surf, fig)
with warnings.catch_warnings(record=True): # traits
surface = mlab.pipeline.surface(mesh, color=colors[ii],
opacity=alpha, figure=fig)
surface.actor.property.backface_culling = True
# Now show our field pattern
mesh = _create_mesh_surf(surf, fig, scalars=data)
with warnings.catch_warnings(record=True): # traits
fsurf = mlab.pipeline.surface(mesh, vmin=-vlim, vmax=vlim,
figure=fig)
fsurf.module_manager.scalar_lut_manager.lut.table = colormap
fsurf.actor.property.backface_culling = True
# And the field lines on top
mesh = _create_mesh_surf(surf, fig, scalars=data)
with warnings.catch_warnings(record=True): # traits
cont = mlab.pipeline.contour_surface(
mesh, contours=21, line_width=1.0, vmin=-vlim, vmax=vlim,
opacity=alpha, figure=fig)
cont.module_manager.scalar_lut_manager.lut.table = colormap_lines
if '%' in time_label:
time_label %= (1e3 * evoked.times[time_idx])
with warnings.catch_warnings(record=True): # traits
mlab.text(0.01, 0.01, time_label, width=0.4, figure=fig)
with SilenceStdout(): # setting roll
mlab.view(10, 60, figure=fig)
_toggle_mlab_render(fig, True)
return fig
def _create_mesh_surf(surf, fig=None, scalars=None):
"""Create Mayavi mesh from MNE surf."""
mlab = _import_mlab()
nn = surf['nn'].copy()
# make absolutely sure these are normalized for Mayavi
norm = np.sum(nn * nn, axis=1)
mask = norm > 0
nn[mask] /= norm[mask][:, np.newaxis]
x, y, z = surf['rr'].T
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(
x, y, z, surf['tris'], scalars=scalars, figure=fig)
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
mesh.update()
return mesh
def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
slices=None, show=True, img_output=False):
"""Plot BEM contours on anatomical slices.
Parameters
----------
mri_fname : str
The name of the file containing anatomical data.
surf_fnames : list of str
The filenames for the BEM surfaces in the format
['inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf'].
orientation : str
'coronal' or 'transverse' or 'sagittal'
slices : list of int
Slice indices.
show : bool
Call pyplot.show() at the end.
img_output : None | tuple
If tuple (width and height), images will be produced instead of a
single figure with many axes. This mode is designed to reduce the
(substantial) overhead associated with making tens to hundreds
of matplotlib axes, instead opting to re-use a single Axes instance.
Returns
-------
fig : Instance of matplotlib.figure.Figure | list
The figure. Will instead be a list of png images if
img_output is a tuple.
"""
import matplotlib.pyplot as plt
import nibabel as nib
if orientation not in ['coronal', 'axial', 'sagittal']:
raise ValueError("Orientation must be 'coronal', 'axial' or "
"'sagittal'. Got %s." % orientation)
# Load the T1 data
nim = nib.load(mri_fname)
data = nim.get_data()
try:
affine = nim.affine
except AttributeError: # old nibabel
affine = nim.get_affine()
n_sag, n_axi, n_cor = data.shape
orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
orientation_axis = orientation_name2axis[orientation]
if slices is None:
n_slices = data.shape[orientation_axis]
slices = np.linspace(0, n_slices, 12, endpoint=False).astype(np.int)
# create of list of surfaces
surfs = list()
trans = linalg.inv(affine)
# XXX : next line is a hack don't ask why
trans[:3, -1] = [n_sag // 2, n_axi // 2, n_cor // 2]
for surf_fname in surf_fnames:
surf = read_surface(surf_fname, return_dict=True)[-1]
# move back surface to MRI coordinate system
surf['rr'] = nib.affines.apply_affine(trans, surf['rr'])
surfs.append(surf)
if img_output is None:
fig, axs = _prepare_trellis(len(slices), 4)
else:
fig, ax = plt.subplots(1, 1, figsize=(7.0, 7.0))
axs = [ax] * len(slices)
fig_size = fig.get_size_inches()
w, h = img_output[0], img_output[1]
w2 = fig_size[0]
fig.set_size_inches([(w2 / float(w)) * w, (w2 / float(w)) * h])
plt.close(fig)
inds = dict(coronal=[0, 1, 2], axial=[2, 0, 1],
sagittal=[2, 1, 0])[orientation]
outs = []
for ax, sl in zip(axs, slices):
# adjust the orientations for good view
if orientation == 'coronal':
dat = data[:, :, sl].transpose()
elif orientation == 'axial':
dat = data[:, sl, :]
elif orientation == 'sagittal':
dat = data[sl, :, :]
# First plot the anatomical data
if img_output is not None:
ax.clear()
ax.imshow(dat, cmap=plt.cm.gray)
ax.axis('off')
# and then plot the contours on top
for surf in surfs:
ax.tricontour(surf['rr'][:, inds[0]], surf['rr'][:, inds[1]],
surf['tris'], surf['rr'][:, inds[2]],
levels=[sl], colors='yellow', linewidths=2.0)
if img_output is not None:
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(0, img_output[1])
ax.set_ylim(img_output[0], 0)
output = BytesIO()
fig.savefig(output, bbox_inches='tight',
pad_inches=0, format='png')
outs.append(base64.b64encode(output.getvalue()).decode('ascii'))
if show:
plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
hspace=0.)
plt_show(show)
return fig if img_output is None else outs
@verbose
def plot_trans(info, trans='auto', subject=None, subjects_dir=None,
source=('bem', 'head', 'outer_skin'),
coord_frame='head', meg_sensors=('helmet', 'sensors'),
eeg_sensors='original', dig=False, ref_meg=False,
ecog_sensors=True, head=None, brain=None, skull=False,
src=None, mri_fiducials=False, verbose=None):
"""Plot head, sensor, and source space alignment in 3D.
Parameters
----------
info : dict
The measurement info.
trans : str | 'auto' | dict | None
The full path to the head<->MRI transform ``*-trans.fif`` file
produced during coregistration. If trans is None, an identity matrix
is assumed.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. Can be omitted if ``src`` is provided.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
source : str | list
Type to load. Common choices would be `'bem'`, `'head'` or
`'outer_skin'`. If list, the sources are looked up in the given order
and first found surface is used. We first try loading
`'$SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-$SOURCE.fif'`, and then look for
`'$SUBJECT*$SOURCE.fif'` in the same directory. For `'outer_skin'`,
the subjects bem and bem/flash folders are searched. Defaults to 'bem'.
Note. For single layer bems it is recommended to use 'head'.
coord_frame : str
Coordinate frame to use, 'head', 'meg', or 'mri'.
meg_sensors : bool | str | list
Can be "helmet" (equivalent to False) or "sensors" to show the MEG
helmet or sensors, respectively, or a combination of the two like
``['helmet', 'sensors']`` (equivalent to True, default) or ``[]``.
eeg_sensors : bool | str | list
Can be "original" (default; equivalent to True) or "projected" to
show EEG sensors in their digitized locations or projected onto the
scalp, or a list of these options including ``[]`` (equivalent of
False).
dig : bool | 'fiducials'
If True, plot the digitization points; 'fiducials' to plot fiducial
points only.
ref_meg : bool
If True (default False), include reference MEG sensors.
ecog_sensors : bool
If True (default), show ECoG sensors.
head : bool | None
If True, show head surface. Can also be None, which will show the
head surface for MEG and EEG, but hide it if ECoG sensors are
present.
brain : bool | str | None
If True, show the brain surfaces. Can also be a str for
surface type (e.g., 'pial', same as True), or None (True for ECoG,
False otherwise).
skull : bool | str | list of str | list of dict
Whether to plot skull surface. If string, common choices would be
'inner_skull', or 'outer_skull'. Can also be a list to plot
multiple skull surfaces. If a list of dicts, each dict must
contain the complete surface info (such as you get from
:func:`mne.make_bem_model`). True is an alias of 'outer_skull'.
The subjects bem and bem/flash folders are searched for the 'surf'
files. Defaults to False.
src : instance of SourceSpaces | None
If not None, also plot the source space points.
.. versionadded:: 0.14
mri_fiducials : bool | str
Plot MRI fiducials (default False). If ``True``, look for a file with
the canonical name (``bem/{subject}-fiducials.fif``). If ``str`` it
should provide the full path to the fiducials file.
.. versionadded:: 0.14
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
from ..forward import _create_meg_coils
mlab = _import_mlab()
if meg_sensors is False: # old behavior
meg_sensors = 'helmet'
elif meg_sensors is True:
meg_sensors = ['helmet', 'sensors']
if eeg_sensors is False:
eeg_sensors = []
elif eeg_sensors is True:
eeg_sensors = 'original'
if isinstance(eeg_sensors, string_types):
eeg_sensors = [eeg_sensors]
if isinstance(meg_sensors, string_types):
meg_sensors = [meg_sensors]
for kind, var in zip(('eeg', 'meg'), (eeg_sensors, meg_sensors)):
if not isinstance(var, (list, tuple)) or \
not all(isinstance(x, string_types) for x in var):
raise TypeError('%s_sensors must be list or tuple of str, got %s'
% (type(var),))
if not all(x in ('helmet', 'sensors') for x in meg_sensors):
raise ValueError('meg_sensors must only contain "helmet" and "points",'
' got %s' % (meg_sensors,))
if not all(x in ('original', 'projected') for x in eeg_sensors):
raise ValueError('eeg_sensors must only contain "original" and '
'"projected", got %s' % (eeg_sensors,))
if not isinstance(info, Info):
raise TypeError('info must be an instance of Info, got %s'
% type(info))
valid_coords = ['head', 'meg', 'mri']
if coord_frame not in valid_coords:
raise ValueError('coord_frame must be one of %s' % (valid_coords,))
if src is not None:
if not isinstance(src, SourceSpaces):
raise TypeError('src must be None or SourceSpaces, got %s'
% (type(src),))
src_subject = src[0].get('subject_his_id', None)
subject = src_subject if subject is None else subject
if src_subject is not None and subject != src_subject:
raise ValueError('subject ("%s") did not match the subject name '
' in src ("%s")' % (subject, src_subject))
src_rr = np.concatenate([s['rr'][s['inuse'].astype(bool)]
for s in src])
src_nn = np.concatenate([s['nn'][s['inuse'].astype(bool)]
for s in src])
else:
src_rr = src_nn = np.empty((0, 3))
meg_picks = pick_types(info, meg=True, ref_meg=ref_meg)
eeg_picks = pick_types(info, meg=False, eeg=True, ref_meg=False)
ecog_picks = pick_types(info, meg=False, ecog=True, ref_meg=False)
if head is None:
head = (len(ecog_picks) == 0 and subject is not None)
if head and subject is None:
raise ValueError('If head is True, subject must be provided')
if isinstance(trans, string_types):
if trans == 'auto':
# let's try to do this in MRI coordinates so they're easy to plot
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
trans = _find_trans(subject, subjects_dir)
trans = read_trans(trans, return_all=True)
for trans in trans: # we got at least 1
try:
trans = _ensure_trans(trans, 'head', 'mri')
except Exception as exp:
pass
else:
break
else:
raise exp
elif trans is None:
trans = Transform('head', 'mri')
elif not isinstance(trans, dict):
raise TypeError('trans must be str, dict, or None')
head_mri_t = _ensure_trans(trans, 'head', 'mri')
dev_head_t = info['dev_head_t']
del trans
# Figure out our transformations
if coord_frame == 'meg':
head_trans = invert_transform(dev_head_t)
meg_trans = Transform('meg', 'meg')
mri_trans = invert_transform(combine_transforms(
dev_head_t, head_mri_t, 'meg', 'mri'))
elif coord_frame == 'mri':
head_trans = head_mri_t
meg_trans = combine_transforms(dev_head_t, head_mri_t, 'meg', 'mri')
mri_trans = Transform('mri', 'mri')
else: # coord_frame == 'head'
head_trans = Transform('head', 'head')
meg_trans = info['dev_head_t']
mri_trans = invert_transform(head_mri_t)
# both the head and helmet will be in MRI coordinates after this
surfs = dict()
if head:
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
head_surf = _get_head_surface(subject, source=source,
subjects_dir=subjects_dir,
raise_error=False)
if head_surf is None:
if isinstance(source, string_types):
source = [source]
for this_surf in source:
if not this_surf.endswith('outer_skin'):
continue
surf_fname = op.join(subjects_dir, subject, 'bem', 'flash',
'%s.surf' % this_surf)
if not op.exists(surf_fname):
surf_fname = op.join(subjects_dir, subject, 'bem',
'%s.surf' % this_surf)
if not op.exists(surf_fname):
continue
logger.info('Using %s for head surface.' % this_surf)
rr, tris = read_surface(surf_fname)
head_surf = dict(rr=rr / 1000., tris=tris, ntri=len(tris),
np=len(rr), coord_frame=FIFF.FIFFV_COORD_MRI)
complete_surface_info(head_surf, copy=False, verbose=False)
break
if head_surf is None:
raise IOError('No head surface found for subject %s.' % subject)
surfs['head'] = head_surf
if mri_fiducials:
if mri_fiducials is True:
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if subject is None:
raise ValueError("Subject needs to be specified to "
"automatically find the fiducials file.")
mri_fiducials = op.join(subjects_dir, subject, 'bem',
subject + '-fiducials.fif')
if isinstance(mri_fiducials, string_types):
mri_fiducials, cf = read_fiducials(mri_fiducials)
if cf != FIFF.FIFFV_COORD_MRI:
raise ValueError("Fiducials are not in MRI space")
fid_loc = _fiducial_coords(mri_fiducials, FIFF.FIFFV_COORD_MRI)
fid_loc = apply_trans(mri_trans, fid_loc)
else:
fid_loc = []
if 'helmet' in meg_sensors and len(meg_picks) > 0:
surfs['helmet'] = get_meg_helmet_surf(info, head_mri_t)
if brain is None:
if len(ecog_picks) > 0 and subject is not None:
brain = 'pial'
else:
brain = False
if brain:
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
brain = 'pial' if brain is True else brain
for hemi in ['lh', 'rh']:
fname = op.join(subjects_dir, subject, 'surf',
'%s.%s' % (hemi, brain))
rr, tris = read_surface(fname)
rr *= 1e-3
surfs[hemi] = dict(rr=rr, tris=tris, ntri=len(tris), np=len(rr),
coord_frame=FIFF.FIFFV_COORD_MRI)
complete_surface_info(surfs[hemi], copy=False, verbose=False)
if skull is True:
skull = 'outer_skull'
if isinstance(skull, string_types):
skull = [skull]
elif not skull:
skull = []
if len(skull) > 0 and not isinstance(skull[0], dict):
skull = sorted(skull)
skull_alpha = dict()
skull_colors = dict()
hemi_val = 0.5
if src is None or (brain and any(s['type'] == 'surf' for s in src)):
hemi_val = 1.
alphas = (4 - np.arange(len(skull) + 1)) * (0.5 / 4.)
for idx, this_skull in enumerate(skull):
if isinstance(this_skull, dict):
from ..bem import _surf_name
skull_surf = this_skull
this_skull = _surf_name[skull_surf['id']]
else:
skull_fname = op.join(subjects_dir, subject, 'bem', 'flash',
'%s.surf' % this_skull)
if not op.exists(skull_fname):
skull_fname = op.join(subjects_dir, subject, 'bem',
'%s.surf' % this_skull)
if not op.exists(skull_fname):
raise IOError('No skull surface %s found for subject %s.'
% (this_skull, subject))
logger.info('Using %s for head surface.' % skull_fname)
rr, tris = read_surface(skull_fname)
skull_surf = dict(rr=rr / 1000., tris=tris, ntri=len(tris),
np=len(rr), coord_frame=FIFF.FIFFV_COORD_MRI)
complete_surface_info(skull_surf, copy=False, verbose=False)
skull_alpha[this_skull] = alphas[idx + 1]
skull_colors[this_skull] = (0.95 - idx * 0.2, 0.85, 0.95 - idx * 0.2)
surfs[this_skull] = skull_surf
if src is None and brain is False and len(skull) == 0:
head_alpha = 1.0
else:
head_alpha = alphas[0]
for key in surfs.keys():
surfs[key] = transform_surface_to(surfs[key], coord_frame, mri_trans)
src_rr = apply_trans(mri_trans, src_rr)
src_nn = apply_trans(mri_trans, src_nn, move=False)
# determine points
meg_rrs, meg_tris = list(), list()
ecog_loc = list()
hpi_loc = list()
ext_loc = list()
car_loc = list()
eeg_loc = list()
eegp_loc = list()
if len(eeg_sensors) > 0:
eeg_loc = np.array([info['chs'][k]['loc'][:3] for k in eeg_picks])
if len(eeg_loc) > 0:
eeg_loc = apply_trans(head_trans, eeg_loc)
# XXX do projections here if necessary
if 'projected' in eeg_sensors:
eegp_loc, eegp_nn = _project_onto_surface(
eeg_loc, surfs['head'], project_rrs=True,
return_nn=True)[2:4]
if 'original' not in eeg_sensors:
eeg_loc = list()
del eeg_sensors
if 'sensors' in meg_sensors:
coil_transs = [_loc_to_coil_trans(info['chs'][pick]['loc'])
for pick in meg_picks]
coils = _create_meg_coils([info['chs'][pick] for pick in meg_picks],
acc='normal')
offset = 0
for coil, coil_trans in zip(coils, coil_transs):
rrs, tris = _sensor_shape(coil)
rrs = apply_trans(coil_trans, rrs)
meg_rrs.append(rrs)
meg_tris.append(tris + offset)
offset += len(meg_rrs[-1])
if len(meg_rrs) == 0:
warn('MEG electrodes not found. Cannot plot MEG locations.')
else:
meg_rrs = apply_trans(meg_trans, np.concatenate(meg_rrs, axis=0))
meg_tris = np.concatenate(meg_tris, axis=0)
del meg_sensors
if dig:
if dig == 'fiducials':
hpi_loc = ext_loc = []
elif dig is not True:
raise ValueError("dig needs to be True, False or 'fiducials', "
"not %s" % repr(dig))
else:
hpi_loc = np.array([d['r'] for d in info['dig']
if d['kind'] == FIFF.FIFFV_POINT_HPI])
ext_loc = np.array([d['r'] for d in info['dig']
if d['kind'] == FIFF.FIFFV_POINT_EXTRA])
car_loc = _fiducial_coords(info['dig'])
# Transform from head coords if necessary
if coord_frame == 'meg':
for loc in (hpi_loc, ext_loc, car_loc):
loc[:] = apply_trans(invert_transform(info['dev_head_t']), loc)
elif coord_frame == 'mri':
for loc in (hpi_loc, ext_loc, car_loc):
loc[:] = apply_trans(head_mri_t, loc)
if len(car_loc) == len(ext_loc) == len(hpi_loc) == 0:
warn('Digitization points not found. Cannot plot digitization.')
del dig
if len(ecog_picks) > 0 and ecog_sensors:
ecog_loc = np.array([info['chs'][pick]['loc'][:3]
for pick in ecog_picks])
# initialize figure
fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
_toggle_mlab_render(fig, False)
# plot surfaces
alphas = dict(head=head_alpha, helmet=0.5, lh=hemi_val, rh=hemi_val)
alphas.update(skull_alpha)
colors = dict(head=(0.6,) * 3, helmet=(0.0, 0.0, 0.6), lh=(0.5,) * 3,
rh=(0.5,) * 3)
colors.update(skull_colors)
for key, surf in surfs.items():
# Make a solid surface
mesh = _create_mesh_surf(surf, fig)
with warnings.catch_warnings(record=True): # traits
surface = mlab.pipeline.surface(mesh, color=colors[key],
opacity=alphas[key], figure=fig)
if key != 'helmet':
surface.actor.property.backface_culling = True
# plot points
defaults = DEFAULTS['coreg']
datas = [eeg_loc,
hpi_loc,
ext_loc, ecog_loc]
colors = [defaults['eeg_color'],
defaults['hpi_color'],
defaults['extra_color'], defaults['ecog_color']]
alphas = [0.8,
0.5,
0.25, 0.8]
scales = [defaults['eeg_scale'],
defaults['hpi_scale'],
defaults['extra_scale'], defaults['ecog_scale']]
for kind, loc in (('dig', car_loc), ('mri', fid_loc)):
if len(loc) > 0:
datas.extend(loc[:, np.newaxis])
colors.extend((defaults['lpa_color'],
defaults['nasion_color'],
defaults['rpa_color']))
alphas.extend(3 * (defaults[kind + '_fid_opacity'],))
scales.extend(3 * (defaults[kind + '_fid_scale'],))
for data, color, alpha, scale in zip(datas, colors, alphas, scales):
if len(data) > 0:
with warnings.catch_warnings(record=True): # traits
points = mlab.points3d(data[:, 0], data[:, 1], data[:, 2],
color=color, scale_factor=scale,
opacity=alpha, figure=fig)
points.actor.property.backface_culling = True
if len(eegp_loc) > 0:
with warnings.catch_warnings(record=True): # traits
quiv = mlab.quiver3d(
eegp_loc[:, 0], eegp_loc[:, 1], eegp_loc[:, 2],
eegp_nn[:, 0], eegp_nn[:, 1], eegp_nn[:, 2],
color=defaults['eegp_color'], mode='cylinder',
scale_factor=defaults['eegp_scale'], opacity=0.6, figure=fig)
quiv.glyph.glyph_source.glyph_source.height = defaults['eegp_height']
quiv.glyph.glyph_source.glyph_source.center = \
(0., -defaults['eegp_height'], 0)
quiv.glyph.glyph_source.glyph_source.resolution = 20
quiv.actor.property.backface_culling = True
if len(meg_rrs) > 0:
color, alpha = (0., 0.25, 0.5), 0.25
surf = dict(rr=meg_rrs, tris=meg_tris)
complete_surface_info(surf, copy=False, verbose=False)
mesh = _create_mesh_surf(surf, fig)
with warnings.catch_warnings(record=True): # traits
surface = mlab.pipeline.surface(mesh, color=color,
opacity=alpha, figure=fig)
# Don't cull these backfaces
if len(src_rr) > 0:
with warnings.catch_warnings(record=True): # traits
quiv = mlab.quiver3d(
src_rr[:, 0], src_rr[:, 1], src_rr[:, 2],
src_nn[:, 0], src_nn[:, 1], src_nn[:, 2], color=(1., 1., 0.),
mode='cylinder', scale_factor=3e-3, opacity=0.75, figure=fig)
quiv.glyph.glyph_source.glyph_source.height = 0.25
quiv.glyph.glyph_source.glyph_source.center = (0., 0., 0.)
quiv.glyph.glyph_source.glyph_source.resolution = 20
quiv.actor.property.backface_culling = True
with SilenceStdout():
mlab.view(90, 90, figure=fig)
_toggle_mlab_render(fig, True)
return fig
def _make_tris_fan(n_vert):
"""Make tris given a number of vertices of a circle-like obj."""
tris = np.zeros((n_vert - 2, 3), int)
tris[:, 2] = np.arange(2, n_vert)
tris[:, 1] = tris[:, 2] - 1
return tris
def _sensor_shape(coil):
"""Get the sensor shape vertices."""
rrs = np.empty([0, 2])
tris = np.empty([0, 3], int)
id_ = coil['type'] & 0xFFFF
if id_ in (2, 3012, 3013, 3011):
# square figure eight
# wound by right hand rule such that +x side is "up" (+z)
long_side = coil['size'] # length of long side (meters)
offset = 0.0025 # offset of the center portion of planar grad coil
rrs = np.array([
[offset, -long_side / 2.],
[long_side / 2., -long_side / 2.],
[long_side / 2., long_side / 2.],
[offset, long_side / 2.],
[-offset, -long_side / 2.],
[-long_side / 2., -long_side / 2.],
[-long_side / 2., long_side / 2.],
[-offset, long_side / 2.]])
tris = np.concatenate((_make_tris_fan(4),
_make_tris_fan(4) + 4), axis=0)
elif id_ in (2000, 3022, 3023, 3024):
# square magnetometer (potentially point-type)
size = 0.001 if id_ == 2000 else (coil['size'] / 2.)
rrs = np.array([[-1., 1.], [1., 1.], [1., -1.], [-1., -1.]]) * size
tris = _make_tris_fan(4)
elif id_ in (4001, 4003, 5002, 7002, 7003,
FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG):
# round magnetometer
n_pts = 15 # number of points for circle
circle = np.exp(2j * np.pi * np.arange(n_pts) / float(n_pts))
circle = np.concatenate(([0.], circle))
circle *= coil['size'] / 2. # radius of coil
rrs = np.array([circle.real, circle.imag]).T
tris = _make_tris_fan(n_pts + 1)
elif id_ in (4002, 5001, 5003, 5004, 4004, 4005, 6001, 7001,
FIFF.FIFFV_COIL_ARTEMIS123_GRAD,
FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD):
# round coil 1st order (off-diagonal) gradiometer
baseline = coil['base'] if id_ in (5004, 4005) else 0.
n_pts = 16 # number of points for circle
# This time, go all the way around circle to close it fully
circle = np.exp(2j * np.pi * np.arange(-1, n_pts) / float(n_pts - 1))
circle[0] = 0 # center pt for triangulation
circle *= coil['size'] / 2.
rrs = np.array([ # first, second coil
np.concatenate([circle.real + baseline / 2.,
circle.real - baseline / 2.]),
np.concatenate([circle.imag, -circle.imag])]).T
tris = np.concatenate([_make_tris_fan(n_pts + 1),
_make_tris_fan(n_pts + 1) + n_pts + 1])
# Go from (x,y) -> (x,y,z)
rrs = np.pad(rrs, ((0, 0), (0, 1)), mode='constant')
return rrs, tris
def _limits_to_control_points(clim, stc_data, colormap):
"""Convert limits (values or percentiles) to control points.
Note: If using 'mne', generate cmap control points for a directly
mirrored cmap for simplicity (i.e., no normalization is computed to account
for a 2-tailed mne cmap).
Parameters
----------
clim : str | dict
Desired limits use to set cmap control points.
Returns
-------
ctrl_pts : list (length 3)
Array of floats corresponding to values to use as cmap control points.
colormap : str
The colormap.
"""
# Based on type of limits specified, get cmap control points
if colormap == 'auto':
if clim == 'auto':
colormap = 'mne' if (stc_data < 0).any() else 'hot'
else:
if 'lims' in clim:
colormap = 'hot'
else: # 'pos_lims' in clim
colormap = 'mne'
if clim == 'auto':
# Set upper and lower bound based on percent, and get average between
ctrl_pts = np.percentile(np.abs(stc_data), [96, 97.5, 99.95])
elif isinstance(clim, dict):
# Get appropriate key for clim if it's a dict
limit_key = ['lims', 'pos_lims'][colormap in ('mne', 'mne_analyze')]
if colormap != 'auto' and limit_key not in clim.keys():
raise KeyError('"pos_lims" must be used with "mne" colormap')
clim['kind'] = clim.get('kind', 'percent')
if clim['kind'] == 'percent':
ctrl_pts = np.percentile(np.abs(stc_data),
list(np.abs(clim[limit_key])))
elif clim['kind'] == 'value':
ctrl_pts = np.array(clim[limit_key])
if (np.diff(ctrl_pts) < 0).any():
raise ValueError('value colormap limits must be strictly '
'nondecreasing')
else:
raise ValueError('If clim is a dict, clim[kind] must be '
' "value" or "percent"')
else:
raise ValueError('"clim" must be "auto" or dict')
if len(ctrl_pts) != 3:
raise ValueError('"lims" or "pos_lims" is length %i. It must be length'
' 3' % len(ctrl_pts))
ctrl_pts = np.array(ctrl_pts, float)
if len(set(ctrl_pts)) != 3:
if len(set(ctrl_pts)) == 1: # three points match
if ctrl_pts[0] == 0: # all are zero
warn('All data were zero')
ctrl_pts = np.arange(3, dtype=float)
else:
ctrl_pts *= [0., 0.5, 1] # all nonzero pts == max
else: # two points match
# if points one and two are identical, add a tiny bit to the
# control point two; if points two and three are identical,
# subtract a tiny bit from point two.
bump = 1e-5 if ctrl_pts[0] == ctrl_pts[1] else -1e-5
ctrl_pts[1] = ctrl_pts[0] + bump * (ctrl_pts[2] - ctrl_pts[0])
return ctrl_pts, colormap
def _handle_time(time_label, time_unit, times):
"""Handle time label string and units."""
if time_label == 'auto':
if time_unit == 's':
time_label = 'time=%0.3fs'
elif time_unit == 'ms':
time_label = 'time=%0.1fms'
if time_unit == 's':
times = times
elif time_unit == 'ms':
times = 1e3 * times
else:
raise ValueError("time_unit needs to be 's' or 'ms', got %r" %
(time_unit,))
return time_label, times
def _key_pressed_slider(event, params):
"""Handle key presses for time_viewer slider."""
step = 1
if event.key.startswith('ctrl'):
step = 5
event.key = event.key.split('+')[-1]
if event.key not in ['left', 'right']:
return
time_viewer = event.canvas.figure
value = time_viewer.slider.val
times = params['stc'].times
if params['time_unit'] == 'ms':
times = times * 1000.
time_idx = np.argmin(np.abs(times - value))
if event.key == 'left':
time_idx = np.max((0, time_idx - step))
elif event.key == 'right':
time_idx = np.min((len(times) - 1, time_idx + step))
this_time = times[time_idx]
time_viewer.slider.set_val(this_time)
def _smooth_plot(this_time, params):
"""Smooth source estimate data and plot with mpl."""
from ..source_estimate import _morph_buffer
from mpl_toolkits.mplot3d import art3d
ax = params['ax']
stc = params['stc']
ax.clear()
times = stc.times
scaler = 1000. if params['time_unit'] == 'ms' else 1.
if this_time is None:
time_idx = 0
else:
time_idx = np.argmin(np.abs(times - this_time / scaler))
if params['hemi_idx'] == 0:
data = stc.data[:len(stc.vertices[0]), time_idx:time_idx + 1]
else:
data = stc.data[len(stc.vertices[0]):, time_idx:time_idx + 1]
array_plot = _morph_buffer(data, params['vertices'], params['e'],
params['smoothing_steps'], params['n_verts'],
params['inuse'], params['maps'])
vmax = np.max(array_plot)
colors = array_plot / vmax
transp = 0.8
faces = params['faces']
greymap = params['greymap']
cmap = params['cmap']
polyc = ax.plot_trisurf(*params['coords'].T, triangles=faces,
antialiased=False)
color_ave = np.mean(colors[faces], axis=1).flatten()
curv_ave = np.mean(params['curv'][faces], axis=1).flatten()
facecolors = art3d.PolyCollection.get_facecolors(polyc)
to_blend = color_ave > params['ctrl_pts'][0] / vmax
facecolors[to_blend, :3] = ((1 - transp) *
greymap(curv_ave[to_blend])[:, :3] +
transp * cmap(color_ave[to_blend])[:, :3])
facecolors[~to_blend, :3] = greymap(curv_ave[~to_blend])[:, :3]
ax.set_title(params['time_label'] % (times[time_idx] * scaler), color='w')
ax.set_aspect('equal')
ax.axis('off')
ax.set_xlim(-80, 80)
ax.set_ylim(-80, 80)
ax.set_zlim(-80, 80)
ax.figure.canvas.draw()
def _plot_mpl_stc(stc, subject=None, surface='inflated', hemi='lh',
colormap='auto', time_label='auto', smoothing_steps=10,
subjects_dir=None, views='lat', clim='auto', figure=None,
initial_time=None, time_unit='s', background='black',
spacing='oct6', time_viewer=False):
"""Plot source estimate using mpl."""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.widgets import Slider
import nibabel as nib
from scipy import sparse, stats
from ..source_estimate import _get_subject_sphere_tris
if hemi not in ['lh', 'rh']:
raise ValueError("hemi must be 'lh' or 'rh' when using matplotlib. "
"Got %s." % hemi)
kwargs = {'lat': {'elev': 5, 'azim': 0},
'med': {'elev': 5, 'azim': 180},
'fos': {'elev': 5, 'azim': 90},
'cau': {'elev': 5, 'azim': -90},
'dor': {'elev': 90, 'azim': 0},
'ven': {'elev': -90, 'azim': 0},
'fro': {'elev': 5, 'azim': 110},
'par': {'elev': 5, 'azim': -110}}
if views not in kwargs:
raise ValueError("views must be one of ['lat', 'med', 'fos', 'cau', "
"'dor' 'ven', 'fro', 'par']. Got %s." % views)
ctrl_pts, colormap = _limits_to_control_points(clim, stc.data, colormap)
if colormap == 'auto':
colormap = mne_analyze_colormap(clim, format='matplotlib')
time_label, times = _handle_time(time_label, time_unit, stc.times)
fig = plt.figure(figsize=(6, 6)) if figure is None else figure
ax = Axes3D(fig)
hemi_idx = 0 if hemi == 'lh' else 1
surf = op.join(subjects_dir, subject, 'surf', '%s.%s' % (hemi, surface))
if spacing == 'all':
coords, faces = nib.freesurfer.read_geometry(surf)
inuse = slice(None)
else:
stype, sval, ico_surf, src_type_str = _check_spacing(spacing)
surf = _create_surf_spacing(surf, hemi, subject, stype, ico_surf,
subjects_dir)
inuse = surf['vertno']
faces = surf['use_tris']
coords = surf['rr'][inuse]
shape = faces.shape
faces = stats.rankdata(faces, 'dense').reshape(shape) - 1
del surf
vertices = stc.vertices[hemi_idx]
n_verts = len(vertices)
tris = _get_subject_sphere_tris(subject, subjects_dir)[hemi_idx]
e = mesh_edges(tris)
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
maps = sparse.identity(n_vertices).tocsr()
e = e + sparse.eye(n_vertices, n_vertices)
cmap = cm.get_cmap(colormap)
greymap = cm.get_cmap('Greys')
curv = nib.freesurfer.read_morph_data(
op.join(subjects_dir, subject, 'surf', '%s.curv' % hemi))[inuse]
curv = np.clip(np.array(curv > 0, np.int), 0.2, 0.8)
params = dict(ax=ax, stc=stc, coords=coords, faces=faces,
hemi_idx=hemi_idx, vertices=vertices, e=e,
smoothing_steps=smoothing_steps, n_verts=n_verts,
inuse=inuse, maps=maps, cmap=cmap, curv=curv,
ctrl_pts=ctrl_pts, greymap=greymap, time_label=time_label,
time_unit=time_unit)
_smooth_plot(initial_time, params)
ax.view_init(**kwargs[views])
try:
ax.set_facecolor(background)
except AttributeError:
ax.set_axis_bgcolor(background)
if time_viewer:
time_viewer = figure_nobar(figsize=(4.5, .25))
fig.time_viewer = time_viewer
ax_time = plt.axes()
if initial_time is None:
initial_time = 0
slider = Slider(ax=ax_time, label='Time', valmin=times[0],
valmax=times[-1], valinit=initial_time,
valfmt=time_label)
time_viewer.slider = slider
callback_slider = partial(_smooth_plot, params=params)
slider.on_changed(callback_slider)
callback_key = partial(_key_pressed_slider, params=params)
time_viewer.canvas.mpl_connect('key_press_event', callback_key)
time_viewer.subplots_adjust(left=0.12, bottom=0.05, right=0.75,
top=0.95)
fig.subplots_adjust(left=0., bottom=0., right=1., top=1.)
plt.show()
return fig
def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
colormap='auto', time_label='auto',
smoothing_steps=10, transparent=None, alpha=1.0,
time_viewer=False, subjects_dir=None, figure=None,
views='lat', colorbar=True, clim='auto',
cortex="classic", size=800, background="black",
foreground="white", initial_time=None,
time_unit='s', backend='auto', spacing='oct6'):
"""Plot SourceEstimates with PySurfer.
Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
which will automatically be set by this function. Plotting multiple
SourceEstimates with different values for subjects_dir will cause
PySurfer to use the wrong FreeSurfer surfaces when using methods of
the returned Brain object. It is therefore recommended to set the
SUBJECTS_DIR environment variable or always use the same value for
subjects_dir (within the same Python session).
By default this function uses Mayavi to plot the source estimates. If
Mayavi is not installed, the plotting is done with matplotlib (much slower,
decimated source space by default).
Parameters
----------
stc : SourceEstimates
The source estimates to plot.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None stc.subject will be used. If that
is None, the environment will be used.
surface : str
The type of surface (inflated, white etc.).
hemi : str, 'lh' | 'rh' | 'split' | 'both'
The hemisphere to display.
colormap : str | np.ndarray of float, shape(n_colors, 3 | 4)
Name of colormap to use or a custom look up table. If array, must
be (n x 3) or (n x 4) array for with RGB or RGBA values between
0 and 255. If 'auto', either 'hot' or 'mne' will be chosen
based on whether 'lims' or 'pos_lims' are specified in `clim`.
time_label : str | callable | None
Format of the time label (a format string, a function that maps
floating point time values to strings, or None for no label). The
default is ``time=%0.2f ms``.
smoothing_steps : int
The amount of smoothing
transparent : bool | None
If True, use a linear transparency between fmin and fmid.
None will choose automatically based on colormap type. Has no effect
with mpl backend.
alpha : float
Alpha value to apply globally to the overlay. Has no effect with mpl
backend.
time_viewer : bool
Display time viewer GUI.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
figure : instance of mayavi.core.scene.Scene | instance of matplotlib.figure.Figure | list | int | None
If None, a new figure will be created. If multiple views or a
split view is requested, this must be a list of the appropriate
length. If int is provided it will be used to identify the Mayavi
figure by it's id or create a new figure with the given id. If an
instance of matplotlib figure, mpl backend is used for plotting.
views : str | list
View to use. See surfer.Brain(). Supported views: ['lat', 'med', 'fos',
'cau', 'dor' 'ven', 'fro', 'par']. Using multiple views is not
supported for mpl backend.
colorbar : bool
If True, display colorbar on scene. Not available on mpl backend.
clim : str | dict
Colorbar properties specification. If 'auto', set clim automatically
based on data percentiles. If dict, should contain:
``kind`` : str
Flag to specify type of limits. 'value' or 'percent'.
``lims`` : list | np.ndarray | tuple of float, 3 elements
Note: Only use this if 'colormap' is not 'mne'.
Left, middle, and right bound for colormap.
``pos_lims`` : list | np.ndarray | tuple of float, 3 elements
Note: Only use this if 'colormap' is 'mne'.
Left, middle, and right bound for colormap. Positive values
will be mirrored directly across zero during colormap
construction to obtain negative control points.
cortex : str or tuple
Specifies how binarized curvature values are rendered.
Either the name of a preset PySurfer cortex colorscheme (one of
'classic', 'bone', 'low_contrast', or 'high_contrast'), or the name of
mayavi colormap, or a tuple with values (colormap, min, max, reverse)
to fully specify the curvature colors. Has no effect with mpl backend.
size : float or pair of floats
The size of the window, in pixels. can be one number to specify
a square window, or the (width, height) of a rectangular window.
Has no effect with mpl backend.
background : matplotlib color
Color of the background of the display window.
foreground : matplotlib color
Color of the foreground of the display window. Has no effect with mpl
backend.
initial_time : float | None
The time to display on the plot initially. ``None`` to display the
first time sample (default).
time_unit : 's' | 'ms'
Whether time is represented in seconds ("s", default) or
milliseconds ("ms").
backend : 'auto' | 'mayavi' | 'matplotlib'
Which backend to use. If ``'auto'`` (default), tries to plot with
mayavi, but resorts to matplotlib if mayavi is not available.
.. versionadded:: 0.15.0
spacing : str
The spacing to use for the source space. Can be ``'ico#'`` for a
recursively subdivided icosahedron, ``'oct#'`` for a recursively
subdivided octahedron, or ``'all'`` for all points. In general, you can
speed up the plotting by selecting a sparser source space. Has no
effect with mayavi backend. Defaults to 'oct6'.
.. versionadded:: 0.15.0
Returns
-------
figure : surfer.viz.Brain | matplotlib.figure.Figure
An instance of surfer.viz.Brain from PySurfer or matplotlib figure.
""" # noqa: E501
# import here to avoid circular import problem
from ..source_estimate import SourceEstimate
if not isinstance(stc, SourceEstimate):
raise ValueError('stc has to be a surface source estimate')
subjects_dir = get_subjects_dir(subjects_dir=subjects_dir,
raise_error=True)
subject = _check_subject(stc.subject, subject, True)
if backend not in ['auto', 'matplotlib', 'mayavi']:
raise ValueError("backend must be 'auto', 'mayavi' or 'matplotlib'. "
"Got %s." % backend)
plot_mpl = backend == 'matplotlib'
if not plot_mpl:
try:
import mayavi
except ImportError:
if backend == 'auto':
warn('Mayavi not found. Resorting to matplotlib 3d.')
plot_mpl = True
else: # 'mayavi'
raise
if plot_mpl:
return _plot_mpl_stc(stc, subject=subject, surface=surface, hemi=hemi,
colormap=colormap, time_label=time_label,
smoothing_steps=smoothing_steps,
subjects_dir=subjects_dir, views=views, clim=clim,
figure=figure, initial_time=initial_time,
time_unit=time_unit, background=background,
spacing=spacing, time_viewer=time_viewer)
import surfer
from surfer import Brain, TimeViewer
surfer_version = LooseVersion(surfer.__version__)
v06 = LooseVersion('0.6')
if surfer_version < v06:
raise ImportError("This function requires PySurfer 0.6 (you are "
"running version %s). You can update PySurfer "
"using:\n\n $ pip install -U pysurfer" %
surfer.__version__)
if initial_time is not None and surfer_version > v06:
kwargs = {'initial_time': initial_time}
initial_time = None # don't set it twice
else:
kwargs = {}
if hemi not in ['lh', 'rh', 'split', 'both']:
raise ValueError('hemi has to be either "lh", "rh", "split", '
'or "both"')
# check `figure` parameter (This will be performed by PySurfer > 0.6)
if figure is not None:
if isinstance(figure, int):
# use figure with specified id
size_ = size if isinstance(size, (tuple, list)) else (size, size)
figure = [mayavi.mlab.figure(figure, size=size_)]
elif not isinstance(figure, (list, tuple)):
figure = [figure]
if not all(isinstance(f, mayavi.core.scene.Scene) for f in figure):
raise TypeError('figure must be a mayavi scene or list of scenes')
time_label, times = _handle_time(time_label, time_unit, stc.times)
# convert control points to locations in colormap
ctrl_pts, colormap = _limits_to_control_points(clim, stc.data, colormap)
# Construct cmap manually if 'mne' and get cmap bounds
# and triage transparent argument
if colormap in ('mne', 'mne_analyze'):
colormap = mne_analyze_colormap(ctrl_pts)
scale_pts = [-1 * ctrl_pts[-1], 0, ctrl_pts[-1]]
transparent = False if transparent is None else transparent
else:
scale_pts = ctrl_pts
transparent = True if transparent is None else transparent
if hemi in ['both', 'split']:
hemis = ['lh', 'rh']
else:
hemis = [hemi]
title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0])
with warnings.catch_warnings(record=True): # traits warnings
brain = Brain(subject, hemi=hemi, surf=surface, curv=True,
title=title, cortex=cortex, size=size,
background=background, foreground=foreground,
figure=figure, subjects_dir=subjects_dir,
views=views)
for hemi in hemis:
hemi_idx = 0 if hemi == 'lh' else 1
if hemi_idx == 0:
data = stc.data[:len(stc.vertices[0])]
else:
data = stc.data[len(stc.vertices[0]):]
vertices = stc.vertices[hemi_idx]
if len(data) > 0:
with warnings.catch_warnings(record=True): # traits warnings
brain.add_data(data, colormap=colormap, vertices=vertices,
smoothing_steps=smoothing_steps, time=times,
time_label=time_label, alpha=alpha, hemi=hemi,
colorbar=colorbar, **kwargs)
# scale colormap and set time (index) to display
brain.scale_data_colormap(fmin=scale_pts[0], fmid=scale_pts[1],
fmax=scale_pts[2], transparent=transparent)
if initial_time is not None:
brain.set_time(initial_time)
if time_viewer:
TimeViewer(brain)
return brain
def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
fontsize=18, bgcolor=(.05, 0, .1),
opacity=0.2, brain_color=(0.7,) * 3,
show=True, high_resolution=False,
fig_name=None, fig_number=None, labels=None,
modes=('cone', 'sphere'),
scale_factors=(1, 0.6),
verbose=None, **kwargs):
"""Plot source estimates obtained with sparse solver.
Active dipoles are represented in a "Glass" brain.
If the same source is active in multiple source estimates it is
displayed with a sphere otherwise with a cone in 3D.
Parameters
----------
src : dict
The source space.
stcs : instance of SourceEstimate or list of instances of SourceEstimate
The source estimates (up to 3).
colors : list
List of colors
linewidth : int
Line width in 2D plot.
fontsize : int
Font size.
bgcolor : tuple of length 3
Background color in 3D.
opacity : float in [0, 1]
Opacity of brain mesh.
brain_color : tuple of length 3
Brain color.
show : bool
Show figures if True.
high_resolution : bool
If True, plot on the original (non-downsampled) cortical mesh.
fig_name :
Mayavi figure name.
fig_number :
Matplotlib figure number.
labels : ndarray or list of ndarrays
Labels to show sources in clusters. Sources with the same
label and the waveforms within each cluster are presented in
the same color. labels should be a list of ndarrays when
stcs is a list ie. one label for each stc.
modes : list
Should be a list, with each entry being ``'cone'`` or ``'sphere'``
to specify how the dipoles should be shown.
scale_factors : list
List of floating point scale factors for the markers.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
**kwargs : kwargs
Keyword arguments to pass to mlab.triangular_mesh.
Returns
-------
surface : instance of mlab Surface
The triangular mesh surface.
"""
mlab = _import_mlab()
import matplotlib.pyplot as plt
from matplotlib.colors import ColorConverter
known_modes = ['cone', 'sphere']
if not isinstance(modes, (list, tuple)) or \
not all(mode in known_modes for mode in modes):
raise ValueError('mode must be a list containing only '
'"cone" or "sphere"')
if not isinstance(stcs, list):
stcs = [stcs]
if labels is not None and not isinstance(labels, list):
labels = [labels]
if colors is None:
colors = COLORS
linestyles = ['-', '--', ':']
# Show 3D
lh_points = src[0]['rr']
rh_points = src[1]['rr']
points = np.r_[lh_points, rh_points]
lh_normals = src[0]['nn']
rh_normals = src[1]['nn']
normals = np.r_[lh_normals, rh_normals]
if high_resolution:
use_lh_faces = src[0]['tris']
use_rh_faces = src[1]['tris']
else:
use_lh_faces = src[0]['use_tris']
use_rh_faces = src[1]['use_tris']
use_faces = np.r_[use_lh_faces, lh_points.shape[0] + use_rh_faces]
points *= 170
vertnos = [np.r_[stc.lh_vertno, lh_points.shape[0] + stc.rh_vertno]
for stc in stcs]
unique_vertnos = np.unique(np.concatenate(vertnos).ravel())
color_converter = ColorConverter()
f = mlab.figure(figure=fig_name, bgcolor=bgcolor, size=(600, 600))
mlab.clf()
_toggle_mlab_render(f, False)
with warnings.catch_warnings(record=True): # traits warnings
surface = mlab.triangular_mesh(points[:, 0], points[:, 1],
points[:, 2], use_faces,
color=brain_color,
opacity=opacity, **kwargs)
surface.actor.property.backface_culling = True
# Show time courses
fig = plt.figure(fig_number)
fig.clf()
ax = fig.add_subplot(111)
colors = cycle(colors)
logger.info("Total number of active sources: %d" % len(unique_vertnos))
if labels is not None:
colors = [advance_iterator(colors) for _ in
range(np.unique(np.concatenate(labels).ravel()).size)]
for idx, v in enumerate(unique_vertnos):
# get indices of stcs it belongs to
ind = [k for k, vertno in enumerate(vertnos) if v in vertno]
is_common = len(ind) > 1
if labels is None:
c = advance_iterator(colors)
else:
# if vertex is in different stcs than take label from first one
c = colors[labels[ind[0]][vertnos[ind[0]] == v]]
mode = modes[1] if is_common else modes[0]
scale_factor = scale_factors[1] if is_common else scale_factors[0]
if (isinstance(scale_factor, (np.ndarray, list, tuple)) and
len(unique_vertnos) == len(scale_factor)):
scale_factor = scale_factor[idx]
x, y, z = points[v]
nx, ny, nz = normals[v]
with warnings.catch_warnings(record=True): # traits
mlab.quiver3d(x, y, z, nx, ny, nz, color=color_converter.to_rgb(c),
mode=mode, scale_factor=scale_factor)
for k in ind:
vertno = vertnos[k]
mask = (vertno == v)
assert np.sum(mask) == 1
linestyle = linestyles[k]
ax.plot(1e3 * stcs[k].times, 1e9 * stcs[k].data[mask].ravel(),
c=c, linewidth=linewidth, linestyle=linestyle)
ax.set_xlabel('Time (ms)', fontsize=18)
ax.set_ylabel('Source amplitude (nAm)', fontsize=18)
if fig_name is not None:
ax.set_title(fig_name)
plt_show(show)
surface.actor.property.backface_culling = True
surface.actor.property.shading = True
_toggle_mlab_render(f, True)
return surface
def _toggle_mlab_render(fig, render):
mlab = _import_mlab()
if mlab.options.backend != 'test':
fig.scene.disable_render = not render
def plot_dipole_locations(dipoles, trans, subject, subjects_dir=None,
mode='orthoview', coord_frame='mri', idx='gof',
show_all=True, ax=None, block=False,
show=True, verbose=None):
"""Plot dipole locations.
If mode is set to 'cone' or 'sphere', only the location of the first
time point of each dipole is shown else use the show_all parameter.
The option mode='orthoview' was added in version 0.14.
Parameters
----------
dipoles : list of instances of Dipole | Dipole
The dipoles to plot.
trans : dict
The mri to head trans.
subject : str
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
subjects_dir : None | str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
The default is None.
mode : str
Currently only ``'orthoview'`` is supported.
.. versionadded:: 0.14.0
coord_frame : str
Coordinate frame to use, 'head' or 'mri'. Defaults to 'mri'.
.. versionadded:: 0.14.0
idx : int | 'gof' | 'amplitude'
Index of the initially plotted dipole. Can also be 'gof' to plot the
dipole with highest goodness of fit value or 'amplitude' to plot the
dipole with the highest amplitude. The dipoles can also be browsed
through using up/down arrow keys or mouse scroll. Defaults to 'gof'.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
show_all : bool
Whether to always plot all the dipoles. If True (default), the active
dipole is plotted as a red dot and it's location determines the shown
MRI slices. The the non-active dipoles are plotted as small blue dots.
If False, only the active dipole is plotted.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
ax : instance of matplotlib Axes3D | None
Axes to plot into. If None (default), axes will be created.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
block : bool
Whether to halt program execution until the figure is closed. Defaults
to False.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
show : bool
Show figure if True. Defaults to True.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
fig : instance of mlab.Figure or matplotlib Figure
The mayavi figure or matplotlib Figure.
Notes
-----
.. versionadded:: 0.9.0
"""
if mode == 'orthoview':
fig = _plot_dipole_mri_orthoview(
dipoles, trans=trans, subject=subject, subjects_dir=subjects_dir,
coord_frame=coord_frame, idx=idx, show_all=show_all,
ax=ax, block=block, show=show)
else:
raise ValueError('Mode must be "orthoview", got %s.' % (mode,))
return fig
def snapshot_brain_montage(fig, montage, hide_sensors=True):
"""Take a snapshot of a Mayavi Scene and project channels onto 2d coords.
Note that this will take the raw values for 3d coordinates of each channel,
without applying any transforms. If brain images are flipped up/dn upon
using `imshow`, check your matplotlib backend as this behavior changes.
Parameters
----------
fig : instance of Mayavi Scene
The figure on which you've plotted electrodes using `plot_trans`.
montage : instance of `DigMontage` or `Info` | dict of ch: xyz mappings.
The digital montage for the electrodes plotted in the scene. If `Info`,
channel positions will be pulled from the `loc` field of `chs`.
hide_sensors : bool
Whether to remove the spheres in the scene before taking a snapshot.
Returns
-------
xy : array, shape (n_channels, 2)
The 2d location of each channel on the image of the current scene view.
im : array, shape (m, n, 3)
The screenshot of the current scene view
"""
mlab = _import_mlab()
from ..channels import Montage, DigMontage
from .. import Info
if isinstance(montage, (Montage, DigMontage)):
chs = montage.dig_ch_pos
ch_names, xyz = zip(*[(ich, ixyz) for ich, ixyz in chs.items()])
elif isinstance(montage, Info):
xyz = [ich['loc'][:3] for ich in montage['chs']]
ch_names = [ich['ch_name'] for ich in montage['chs']]
elif isinstance(montage, dict):
if not all(len(ii) == 3 for ii in montage.values()):
raise ValueError('All electrode positions must be length 3')
ch_names, xyz = zip(*[(ich, ixyz) for ich, ixyz in montage.items()])
else:
raise ValueError('montage must be an instance of `DigMontage`, `Info`,'
' or `dict`')
xyz = np.vstack(xyz)
xy = _3d_to_2d(fig, xyz)
xy = dict(zip(ch_names, xy))
pts = fig.children[-1]
if hide_sensors is True:
pts.visible = False
with warnings.catch_warnings(record=True):
im = mlab.screenshot(fig)
pts.visible = True
return xy, im
def _3d_to_2d(fig, xyz):
"""Convert 3d points to a 2d perspective using a Mayavi Scene."""
from mayavi.core.scene import Scene
if not isinstance(fig, Scene):
raise TypeError('fig must be an instance of Scene, '
'found type %s' % type(fig))
xyz = np.column_stack([xyz, np.ones(xyz.shape[0])])
# Transform points into 'unnormalized' view coordinates
comb_trans_mat = _get_world_to_view_matrix(fig.scene)
view_coords = np.dot(comb_trans_mat, xyz.T).T
# Divide through by the fourth element for normalized view coords
norm_view_coords = view_coords / (view_coords[:, 3].reshape(-1, 1))
# Transform from normalized view coordinates to display coordinates.
view_to_disp_mat = _get_view_to_display_matrix(fig.scene)
xy = np.dot(view_to_disp_mat, norm_view_coords.T).T
# Pull the first two columns since they're meaningful for 2d plotting
xy = xy[:, :2]
return xy
def _get_world_to_view_matrix(scene):
"""Return the 4x4 matrix to transform xyz space to the current view.
This is a concatenation of the model view and perspective transforms.
"""
from mayavi.core.ui.mayavi_scene import MayaviScene
from tvtk.pyface.tvtk_scene import TVTKScene
if not isinstance(scene, (MayaviScene, TVTKScene)):
raise TypeError('scene must be an instance of TVTKScene/MayaviScene, '
'found type %s' % type(scene))
cam = scene.camera
# The VTK method needs the aspect ratio and near and far
# clipping planes in order to return the proper transform.
scene_size = tuple(scene.get_size())
clip_range = cam.clipping_range
aspect_ratio = float(scene_size[0]) / scene_size[1]
# Get the vtk matrix object using the aspect ratio we defined
vtk_comb_trans_mat = cam.get_composite_projection_transform_matrix(
aspect_ratio, clip_range[0], clip_range[1])
vtk_comb_trans_mat = vtk_comb_trans_mat.to_array()
return vtk_comb_trans_mat
def _get_view_to_display_matrix(scene):
"""Return the 4x4 matrix to convert view coordinates to display coordinates.
It's assumed that the view should take up the entire window and that the
origin of the window is in the upper left corner.
"""
from mayavi.core.ui.mayavi_scene import MayaviScene
from tvtk.pyface.tvtk_scene import TVTKScene
if not isinstance(scene, (MayaviScene, TVTKScene)):
raise TypeError('scene must be an instance of TVTKScene/MayaviScene, '
'found type %s' % type(scene))
# normalized view coordinates have the origin in the middle of the space
# so we need to scale by width and height of the display window and shift
# by half width and half height. The matrix accomplishes that.
x, y = tuple(scene.get_size())
view_to_disp_mat = np.array([[x / 2.0, 0., 0., x / 2.0],
[0., -y / 2.0, 0., y / 2.0],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
return view_to_disp_mat
def _plot_dipole_mri_orthoview(dipole, trans, subject, subjects_dir=None,
coord_frame='head', idx='gof', show_all=True,
ax=None, block=False, show=True):
"""Plot dipoles on top of MRI slices in 3-D."""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from .. import Dipole
if not has_nibabel():
raise ImportError('This function requires nibabel.')
import nibabel as nib
from nibabel.processing import resample_from_to
if coord_frame not in ['head', 'mri']:
raise ValueError("coord_frame must be 'head' or 'mri'. "
"Got %s." % coord_frame)
if not isinstance(dipole, Dipole):
from ..dipole import _concatenate_dipoles
dipole = _concatenate_dipoles(dipole)
if idx == 'gof':
idx = np.argmax(dipole.gof)
elif idx == 'amplitude':
idx = np.argmax(np.abs(dipole.amplitude))
else:
idx = _ensure_int(idx, 'idx', 'an int or one of ["gof", "amplitude"]')
subjects_dir = get_subjects_dir(subjects_dir=subjects_dir,
raise_error=True)
t1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
t1 = nib.load(t1_fname)
vox2ras = t1.header.get_vox2ras_tkr()
ras2vox = linalg.inv(vox2ras)
trans = _get_trans(trans, fro='head', to='mri')[0]
zooms = t1.header.get_zooms()
if coord_frame == 'head':
affine_to = trans['trans'].copy()
affine_to[:3, 3] *= 1000 # to mm
aff = t1.affine.copy()
aff[:3, :3] /= zooms
affine_to = np.dot(affine_to, aff)
t1 = resample_from_to(t1, ([int(t1.shape[i] * zooms[i]) for i
in range(3)], affine_to))
dipole_locs = apply_trans(ras2vox, dipole.pos * 1e3) * zooms
ori = dipole.ori
scatter_points = dipole.pos * 1e3
else:
scatter_points = apply_trans(trans['trans'], dipole.pos) * 1e3
ori = apply_trans(trans['trans'], dipole.ori, move=False)
dipole_locs = apply_trans(ras2vox, scatter_points)
data = t1.get_data()
dims = len(data) # Symmetric size assumed.
dd = dims / 2.
dd *= t1.header.get_zooms()[0]
if ax is None:
fig = plt.figure()
ax = Axes3D(fig)
elif not isinstance(ax, Axes3D):
raise ValueError('ax must be an instance of Axes3D. '
'Got %s.' % type(ax))
else:
fig = ax.get_figure()
gridx, gridy = np.meshgrid(np.linspace(-dd, dd, dims),
np.linspace(-dd, dd, dims))
_plot_dipole(ax, data, dipole_locs, idx, dipole, gridx, gridy, ori,
coord_frame, zooms, show_all, scatter_points)
params = {'ax': ax, 'data': data, 'idx': idx, 'dipole': dipole,
'dipole_locs': dipole_locs, 'gridx': gridx, 'gridy': gridy,
'ori': ori, 'coord_frame': coord_frame, 'zooms': zooms,
'show_all': show_all, 'scatter_points': scatter_points}
ax.view_init(elev=30, azim=-140)
callback_func = partial(_dipole_changed, params=params)
fig.canvas.mpl_connect('scroll_event', callback_func)
fig.canvas.mpl_connect('key_press_event', callback_func)
plt_show(show, block=block)
return fig
def _plot_dipole(ax, data, points, idx, dipole, gridx, gridy, ori, coord_frame,
zooms, show_all, scatter_points):
"""Plot dipoles."""
import matplotlib.pyplot as plt
point = points[idx]
xidx, yidx, zidx = np.round(point).astype(int)
xslice = data[xidx][::-1]
yslice = data[:, yidx][::-1].T
zslice = data[:, :, zidx][::-1].T[::-1]
if coord_frame == 'head':
zooms = (1., 1., 1.)
else:
point = points[idx] * zooms
xidx, yidx, zidx = np.round(point).astype(int)
xyz = scatter_points
ori = ori[idx]
if show_all:
colors = np.repeat('y', len(points))
colors[idx] = 'r'
size = np.repeat(5, len(points))
size[idx] = 20
visibles = range(len(points))
else:
colors = 'r'
size = 20
visibles = idx
offset = np.min(gridx)
ax.scatter(xs=xyz[visibles, 0], ys=xyz[visibles, 1],
zs=xyz[visibles, 2], zorder=2, s=size, facecolor=colors)
xx = np.linspace(offset, xyz[idx, 0], xidx)
yy = np.linspace(offset, xyz[idx, 1], yidx)
zz = np.linspace(offset, xyz[idx, 2], zidx)
ax.plot(xx, np.repeat(xyz[idx, 1], len(xx)), zs=xyz[idx, 2], zorder=1,
linestyle='-', color='r')
ax.plot(np.repeat(xyz[idx, 0], len(yy)), yy, zs=xyz[idx, 2], zorder=1,
linestyle='-', color='r')
ax.plot(np.repeat(xyz[idx, 0], len(zz)),
np.repeat(xyz[idx, 1], len(zz)), zs=zz, zorder=1,
linestyle='-', color='r')
ax.quiver(xyz[idx, 0], xyz[idx, 1], xyz[idx, 2], ori[0], ori[1],
ori[2], length=50, pivot='tail', color='r')
dims = np.array([(len(data) / -2.), (len(data) / 2.)])
ax.set_xlim(-1 * dims * zooms[:2]) # Set axis lims to RAS coordinates.
ax.set_ylim(-1 * dims * zooms[:2])
ax.set_zlim(dims * zooms[:2])
# Plot slices.
ax.contourf(xslice, gridx, gridy, offset=offset, zdir='x',
cmap='gray', zorder=0, alpha=.5)
ax.contourf(gridx, gridy, yslice, offset=offset, zdir='z',
cmap='gray', zorder=0, alpha=.5)
ax.contourf(gridx, zslice, gridy, offset=offset,
zdir='y', cmap='gray', zorder=0, alpha=.5)
plt.suptitle('Dipole %s, Time: %.3fs, GOF: %.1f, Amplitude: %.1fnAm\n' % (
idx, dipole.times[idx], dipole.gof[idx], dipole.amplitude[idx] * 1e9) +
'(%0.1f, %0.1f, %0.1f) mm' % tuple(xyz[idx]))
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.draw()
def _dipole_changed(event, params):
"""Handle dipole plotter scroll/key event."""
if event.key is not None:
if event.key == 'up':
params['idx'] += 1
elif event.key == 'down':
params['idx'] -= 1
else: # some other key
return
elif event.step > 0: # scroll event
params['idx'] += 1
else:
params['idx'] -= 1
params['idx'] = min(max(0, params['idx']), len(params['dipole'].pos) - 1)
params['ax'].clear()
_plot_dipole(params['ax'], params['data'], params['dipole_locs'],
params['idx'], params['dipole'], params['gridx'],
params['gridy'], params['ori'], params['coord_frame'],
params['zooms'], params['show_all'], params['scatter_points'])
|
jaeilepp/mne-python
|
mne/viz/_3d.py
|
Python
|
bsd-3-clause
| 81,443
|
[
"Mayavi",
"VTK"
] |
e5e79e3e6d419f64fe8dd712c83895d90dc98c560a9a69dc58fc9c54fb667841
|
"""cpassdb - Storage
This module contains all code regarding reading from and writing to disk, as
well as directory structuring.
"""
__author__ = "Brian Wiborg <baccenfutter@c-base.org>"
__license__ = "GNU/GPLv2"
import os
import sys
import json
import glob
import arrow
import commands
from .config import STORAGE_ROOT, OBJECTS_DIR, TIMESTAMP_FORMAT, GNUPG_PATH, ADMIN_GROUP
def ensure_directory_exist(path, mode=0775):
"""Recursively create a directory if it doesn't yet exist.
:param path: str - Absolute path of directory to ensure existence.
:raise: IOError - If path is not an absolute path.
"""
if not path.startswith(os.path.sep):
raise IOError("Not an absolute path: {}".format(path))
cursor = '/'
for parent in path.split(os.path.sep):
cursor = os.path.join(cursor, parent)
if os.path.exists(cursor):
if not os.path.isdir(cursor):
raise IOError("Not a directory: {}".format(cursor))
else:
os.mkdir(cursor, mode)
def recursively_remove_empty_dirs(path):
"""Recursively delete all empty directories.
:param path: str - Root directory for maintenance task.
"""
for root, dirs, files in os.walk(path):
if not dirs and not files:
os.rmdir(root)
def new_secret(name=None, secret=None, ttl=30, recipients=None):
"""Get user input for a new secret object.
:param name: str - Name of the secret (incl. category).
:param secret: struct - Secret data structure (usually a dict).
:param ttl: int - Time-to-live in days past today.
:param recipients: list - List of the recipients.
:return: dict - A secret-object data structure in the format of
{
'name': <secret-object-name>,
'secret': <secret-object-data-struct>,
'metadata': {
'ttl': <ttl>,
'recipients': [<recipient>, ...]
}
}
"""
if name is None:
name = raw_input("Name: ")
if secret is None:
secret = {}
if not secret:
while True:
field = raw_input("Field: ")
if field:
value = raw_input("Value: ")
if value:
secret[field] = value
continue
break
if not recipients:
while True:
recipient = raw_input("Recipient: ")
if recipient:
recipients += recipient,
continue
break
if not name:
raise RuntimeError("No name given!")
if not (ttl / 1 == ttl):
raise RuntimeError("Invalid TTL: {}".format(ttl))
if not secret:
raise RuntimeError("Secret is empty!")
if not recipients:
raise RuntimeError("No recipient given!")
return {
'name': name,
'secret': secret,
'ttl': ttl,
'recipients': recipients,
}
class StorageRoot(object):
"""Interface to the cpassdb storage root directory.
The storage root contains an 'objects' subdirectory for storing the actual
secret objects in. There is also an access.log file that serves the purpos
of its name - it holds all access logs.
"""
ACCESS_LOG_FILENAME = 'access.log'
def __init__(self):
self.root = STORAGE_ROOT
self.objects_dir = os.path.join(self.root, OBJECTS_DIR)
self.access_log = None
self.ensure_directory_structure()
def __str__(self):
return self.root
def __repr__(self):
return "<StorageRoot(path={})>".format(self.root)
@property
def all_secrets(self):
"""Property access to all secret-objects in the store.
:return: list - List of secret objects.
"""
output = []
for root, _, files in os.walk(self.objects_dir):
for basename in files:
abs_path = os.path.join(root, basename)
if abs_path.endswith(MetaFile.FILE_SUFFIX):
output.append(
os.path.sep.join(
abs_path.split(os.path.sep)[len(self.objects_dir.split(os.path.sep)):]
).split('.')[0]
)
return output
def get_secrets_of_group(self, group):
"""Get a list of secrets that are encrypted against a specific group.
This method is needed for cyclic re-encryption of secrets upon group
membership changes.
:param group: str - Name of group for which to filter.
:return: list - List of secret-objects.
"""
output = []
for secret in self.all_secrets:
meta_file = MetaFile(self, secret)
if group in meta_file.read()['recipients']:
output.append(secret)
return output
def ensure_directory_structure(self):
"""Ensure that the required subdirectory structure."""
ensure_directory_exist(self.objects_dir)
access_log = os.path.join(self.root, self.ACCESS_LOG_FILENAME)
if not os.path.exists(access_log):
open(access_log, 'w').write("")
def get_object_files(self, name):
"""Retrieve interface to object files of any particular secret object.
:param name: str - Name of the secret object.
:return: tuple - (ObjectFile(), MetaFile())
"""
return (
ObjectFile(self, name),
MetaFile(self, name),
)
def log_access(self, who, what):
"""Maintain an access log.
:param who: str - Key-ID of the signer of the request.
:param what: str - Action
"""
if self.access_log is None:
self.access_log = open(os.path.join(self.root, self.ACCESS_LOG_FILENAME), 'a')
# Turn lists into comma separated values.
if isinstance(what, list):
what = ','.join(what)
timestamp = arrow.now().datetime.isoformat()
message = "{} - {} => {}\n".format(timestamp, who, what.strip())
sys.stderr.write(message)
sys.stderr.flush()
self.access_log.write(message)
self.access_log.flush()
def set_secret(self, signer, name, secret, ttl, *recipients):
"""Persist a secret object to disk.
:param signer: str - Key-ID of the signer of the last request.
:param secret: dict - The secret object data structure.
:param name: str - The name of the secret object (incl. its category).
:param ttl: int - Number of days of validity past today.
:param recipients: tuple - List of all recipients towards whom to encrypt the data.
"""
object_file, meta_file = self.get_object_files(name)
self.log_access(signer, "SET: {}".format(name))
object_file.write(secret, *recipients)
meta_file.write(signer, ttl, *recipients)
def get_secret(self, signer, name):
"""Retrieve the ASCII armor and metadata of the encrypted secret object.
:param self: str - Key-ID of the signer of the request.
:param name: str - Name of the secret object.
:return: dict - {'armor': ..., 'metadata': ...}
"""
object_file, meta_file = self.get_object_files(name)
if not object_file.exists():
raise IOError("Secret doesn't exist: {}".format(name))
self.log_access(signer, "GET: {}".format(name))
return {
"armor": object_file.read(),
"metadata": meta_file.read(),
}
def del_secret(self, signer, name):
"""Delete a secret from disk.
:param signer: str - Key-ID of the signer of the request.
:param name: str - Name of the secret object.
"""
object_file, meta_file = self.get_object_files(name)
if not object_file.exists():
raise IOError("Secret doesn't exist: {}".format(name))
self.log_access(signer, "DEL: {}".format(name))
if object_file.exists():
object_file.delete()
if meta_file.exists():
meta_file.delete()
def list_secrets(self, path):
"""List all secret-objects in a given subdirectory (aka. category).
:param path: str - Path to directory to list.
:return: list - List of secret-objects.
"""
absolute_path = os.path.join(self.root, OBJECTS_DIR, path)
if not absolute_path.endswith(os.path.sep):
absolute_path += os.path.sep
if not os.path.exists(absolute_path) or not os.path.isdir(absolute_path):
raise IOError("Not a directory.")
file_list = glob.glob(absolute_path + '*.' + ObjectFile.FILE_SUFFIX)
secret_objects = [
f.split(os.path.sep)[-1].replace('.{}'.format(ObjectFile.FILE_SUFFIX), '')
for f in file_list
]
dirs = [d for d in os.listdir(absolute_path) if os.path.isdir(os.path.join(absolute_path, d))]
return {
'dirs': dirs,
'secrets': secret_objects,
}
class ObjectFile(object):
FILE_SUFFIX = 'gpg'
def __init__(self, root, name):
"""Interface to a secret object file.
:param root: obj - Instance of StorageRoot
:param name: str - Name of the secret object
"""
self.root = root
self.name = name
@property
def absolute_path(self):
return os.path.join(self.root.objects_dir, self.name + '.' + self.FILE_SUFFIX)
def exists(self):
"""Check if the object file exists.
:return: bool - True if exists.
"""
return os.path.exists(self.absolute_path)
def delete(self):
"""Delete the object file from disk."""
os.remove(self.absolute_path)
def write(self, secret, *recipients):
"""Persist this object file to disk.
:param secret: dict - Secret data structure.
:param *recipients: tuple - List of all recipients towards whom to encrypt the data.
"""
parent_dir, basename = os.path.split(self.absolute_path)
ensure_directory_exist(parent_dir)
# Assemble recipient args. This builds a string in the format
# of '-r <recipient> [-r <recipient> ...]'.
recipient_args = ' '.join(['-r {}'.format(r) for r in recipients])
# Assemble the command-line.
cmd = "echo '{secret}' |"
cmd += " gpg --homedir '{gpg_home}'"
cmd += " --batch --yes --armor"
cmd += " --trust-model always --encrypt"
cmd += " -r {admin_group} {recipient_args}"
cmd += " > {object_file}"
cmd = cmd.format(
secret=json.dumps(secret),
gpg_home=GNUPG_PATH,
admin_group=ADMIN_GROUP,
recipient_args=recipient_args,
object_file=self.absolute_path,
)
# Execute the command.
status, output = commands.getstatusoutput(cmd)
print output
if status:
raise IOError("Not written!")
def read(self):
"""Read and return the encrypted object file.
:return: str - ASCII armor contents of object file.
"""
fp = open(self.absolute_path, 'r')
lines = fp.readlines()
fp.close()
return "".join(lines)
class MetaFile(ObjectFile):
"""Interface to metadata file.
This object derives from ObjectFile overloading the read and write methods.
"""
FILE_SUFFIX = 'meta'
def write(self, signer, ttl, *recipients):
metadata = {
'signer': signer,
'timestamp': arrow.now().format(TIMESTAMP_FORMAT),
'ttl': ttl,
'recipients': recipients,
}
fp = open(self.absolute_path, 'w')
fp.write(json.dumps(metadata, indent=4))
fp.close()
def read(self):
"""Read and return the data structure of this metadata file.
:return: dict - Metadata data structure.
"""
return json.loads(ObjectFile.read(self))
|
baccenfutter/cpassdb
|
cpassdb/storage.py
|
Python
|
gpl-2.0
| 12,362
|
[
"Brian"
] |
9990971b2df4a940891574af736e24718a57518ad5ba6de7ec15e65494310e42
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Control flow graph (CFG) structure for Python AST representation.
The CFG is a digraph with edges representing valid control flow. Each
node is associated with exactly one AST node, but not all AST nodes may have
a corresponding CFG counterpart.
Once built, the CFG itself is immutable, but the values it holds need not be;
they are usually annotated with information extracted by walking the graph.
Tip: Use `Graph.as_dot` to visualize the CFG using any DOT viewer.
Note: the CFG tries to include all code paths that MAY be taken, with a single
notable exception:
* function calls do not generate edges corresponding to exceptions they may
raise (i.e. a function call in the middle of a block does not return or jump
to any except or finally block)
TODO(mdan): Consider adding the edges above. They'd only add ~O(n) edges.
TODO(mdan): Alternatively, consider adding an edge from try to all its excepts.
"""
# TODO(mdan): The notion of 'statements' below is inaccurate.
# They should rather be called 'block statements', because they include
# statements that may have a body, e.g. if and while.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import weakref
from enum import Enum
import gast
import six
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
class Node(object):
"""A node in the CFG.
Although new instances of this class are mutable, the objects that a user
finds in the CFG are typically not.
The nodes represent edges in the CFG graph, and maintain pointers to allow
efficient walking in both forward and reverse order. The following property
holds for all nodes: "child in node.next" iff "node in child.prev".
Attributes:
next: FrozenSet[Node, ...], the nodes that follow this node, in control
flow order
prev: FrozenSet[Node, ...], the nodes that precede this node, in reverse
control flow order
ast_node: ast.AST, the AST node corresponding to this CFG node
"""
def __init__(self, next_, prev, ast_node):
self.next = next_
self.prev = prev
self.ast_node = ast_node
def freeze(self):
self.next = frozenset(self.next)
# Assumption: All CFG nodes have identical life spans, because the graph
# owns them. Nodes should never be used outside the context of an existing
# graph.
self.prev = weakref.WeakSet(self.prev)
def __repr__(self):
if isinstance(self.ast_node, gast.FunctionDef):
return 'def %s' % self.ast_node.name
elif isinstance(self.ast_node, gast.ClassDef):
return 'class %s' % self.ast_node.name
elif isinstance(self.ast_node, gast.withitem):
return parser.unparse(
self.ast_node.context_expr, include_encoding_marker=False).strip()
return parser.unparse(self.ast_node, include_encoding_marker=False).strip()
class Graph(
collections.namedtuple(
'Graph',
['entry', 'exit', 'error', 'index', 'stmt_prev', 'stmt_next'])):
"""A Control Flow Graph.
The CFG maintains an index to allow looking up a CFG node by the AST node to
which it is associated. The index can also be enumerated in top-down, depth
first order.
Walking the graph in forward or reverse order is supported by double
parent-child links.
Note: the error nodes are not wired to their corresponding finally guards,
because these are shared, and wiring them would create a reverse path from
normal control flow into the error nodes, which we want to avoid.
The graph also maintains edges corresponding to higher level statements
like for-else loops. A node is considered successor of a statement if there
is an edge from a node that is lexically a child of that statement to a node
that is not. Statement predecessors are analogously defined.
Attributes:
entry: Node, the entry node
exit: FrozenSet[Node, ...], the exit nodes
error: FrozenSet[Node, ...], nodes that exit due to an explicitly raised
error (errors propagated from function calls are not accounted)
index: Dict[ast.Node, Node], mapping AST nodes to the respective CFG
node
stmt_prev: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST
nodes to their predecessor CFG nodes
stmt_next: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST
nodes to their successor CFG nodes
"""
def __repr__(self):
return self.as_dot()
def as_dot(self):
"""Print CFG in DOT format."""
result = 'digraph CFG {\n'
for node in self.index.values():
result += ' %s [label="%s"];\n' % (id(node), node)
for node in self.index.values():
for next_ in node.next:
result += ' %s -> %s;\n' % (id(node), id(next_))
result += '}'
return result
class _WalkMode(Enum):
FORWARD = 1
REVERSE = 2
# TODO(mdan): Rename to DataFlowAnalyzer.
# TODO(mdan): Consider specializations that use gen/kill/transfer abstractions.
class GraphVisitor(object):
"""Base class for a CFG visitors.
This implementation is not thread safe.
The visitor has some facilities to simplify dataflow analyses. In particular,
it allows revisiting the nodes at the decision of the subclass. This can be
used to visit the graph until the state reaches a fixed point.
For more details on dataflow analysis, see
https://www.seas.harvard.edu/courses/cs252/2011sp/slides/Lec02-Dataflow.pdf
Note: the literature generally suggests visiting successor nodes only when the
state of the current node changed, regardless of whether that successor has
ever been visited. This implementation visits every successor at least once.
Attributes:
graph: Graph
in_: Dict[Node, Any], stores node-keyed state during a visit
out: Dict[Node, Any], stores node-keyed state during a visit
"""
def __init__(self, graph):
self.graph = graph
self.reset()
def init_state(self, node):
"""State initialization function. Optional to overload.
An in/out state slot will be created for each node in the graph. Subclasses
must overload this to control what that is initialized to.
Args:
node: Node
"""
raise NotImplementedError('Subclasses must implement this.')
# TODO(mdan): Rename to flow?
def visit_node(self, node):
"""Visitor function.
Args:
node: Node
Returns:
bool, whether the node should be revisited; subclasses can visit every
reachable node exactly once by always returning False
"""
raise NotImplementedError('Subclasses must implement this.')
def reset(self):
self.in_ = {
node: self.init_state(node) for node in self.graph.index.values()
}
self.out = {
node: self.init_state(node) for node in self.graph.index.values()
}
def can_ignore(self, node):
"""Returns True if the node can safely be assumed not to touch variables."""
ast_node = node.ast_node
if anno.hasanno(ast_node, anno.Basic.SKIP_PROCESSING):
return True
if six.PY2:
if (isinstance(ast_node, gast.Name) and
ast_node.id in ('None', 'True', 'False')):
return True
return isinstance(ast_node,
(gast.Break, gast.Continue, gast.Raise, gast.Pass))
def _visit_internal(self, mode):
"""Visits the CFG, breadth-first."""
assert mode in (_WalkMode.FORWARD, _WalkMode.REVERSE)
if mode == _WalkMode.FORWARD:
open_ = [self.graph.entry]
elif mode == _WalkMode.REVERSE:
open_ = list(self.graph.exit)
closed = set()
while open_:
node = open_.pop(0)
closed.add(node)
should_revisit = self.visit_node(node)
if mode == _WalkMode.FORWARD:
children = node.next
elif mode == _WalkMode.REVERSE:
children = node.prev
for next_ in children:
if should_revisit or next_ not in closed:
open_.append(next_)
def visit_forward(self):
self._visit_internal(_WalkMode.FORWARD)
def visit_reverse(self):
self._visit_internal(_WalkMode.REVERSE)
class GraphBuilder(object):
"""Builder that constructs a CFG from a given AST.
This GraphBuilder facilitates constructing the DAG that forms the CFG when
nodes
are supplied in lexical order (i.e., top-down, depth first). Under these
conditions, it supports building patterns found in typical structured
programs.
This builder ignores the flow generated by exceptions, which are assumed to
always be catastrophic and present purely for diagnostic purposes (e.g. to
print debug information). Statements like raise and try/catch sections are
allowed and will generate control flow edges, but ordinary statements are
assumed not to raise exceptions.
Finally sections are also correctly interleaved between break/continue/return
nodes and their subsequent statements.
Important concepts:
* nodes - nodes refer to CFG nodes; AST nodes are qualified explicitly
* leaf set - since the graph is constructed gradually, a leaf set maintains
the CFG nodes that will precede the node that the builder expects to
receive next; when an ordinary node is added, it is connected to the
existing leaves and it in turn becomes the new leaf
* jump nodes - nodes that should generate edges other than what
ordinary nodes would; these correspond to break, continue and return
statements
* sections - logical delimiters for subgraphs that require special
edges; there are various types of nodes, each admitting various
types of jump nodes; sections are identified by their corresponding AST
node
"""
# TODO(mdan): Perhaps detail this in a markdown doc.
# TODO(mdan): Add exception support.
def __init__(self, parent_ast_node):
self.reset()
self.parent = parent_ast_node
def reset(self):
"""Resets the state of this factory."""
self.head = None
self.errors = set()
self.node_index = {}
# TODO(mdan): Too many primitives. Use classes.
self.leaves = set()
# Note: This mechanism requires that nodes are added in lexical order (top
# to bottom, depth first).
self.active_stmts = set()
self.owners = {} # type: Set[any]
self.forward_edges = set() # type: Tuple[Node, Node] # (from, to)
self.finally_sections = {}
# Dict values represent (entry, exits)
self.finally_section_subgraphs = {
} # type: Dict[ast.AST, Tuple[Node, Set[Node]]]
# Whether the guard section can be reached from the statement that precedes
# it.
self.finally_section_has_direct_flow = {}
# Finally sections that await their first node.
self.pending_finally_sections = set()
# Exit jumps keyed by the section they affect.
self.exits = {}
# The entry of loop sections, keyed by the section.
self.section_entry = {}
# Continue jumps keyed by the section they affect.
self.continues = {}
# Raise jumps keyed by the except section guarding them.
self.raises = {}
# The entry of conditional sections, keyed by the section.
self.cond_entry = {}
# Lists of leaf nodes corresponding to each branch in the section.
self.cond_leaves = {}
def _connect_nodes(self, first, second):
"""Connects nodes to signify that control flows from first to second.
Args:
first: Union[Set[Node, ...], Node]
second: Node
"""
if isinstance(first, Node):
first.next.add(second)
second.prev.add(first)
self.forward_edges.add((first, second))
else:
for node in first:
self._connect_nodes(node, second)
def _add_new_node(self, ast_node):
"""Grows the graph by adding a CFG node following the current leaves."""
if ast_node is self.node_index:
raise ValueError('%s added twice' % ast_node)
# Assumption: All CFG nodes have identical life spans, because the graph
# owns them. Nodes should never be used outside the context of an existing
# graph.
node = Node(next_=set(), prev=weakref.WeakSet(), ast_node=ast_node)
self.node_index[ast_node] = node
self.owners[node] = frozenset(self.active_stmts)
if self.head is None:
self.head = node
for leaf in self.leaves:
self._connect_nodes(leaf, node)
# If any finally section awaits its first node, populate it.
for section_id in self.pending_finally_sections:
self.finally_section_subgraphs[section_id][0] = node
self.pending_finally_sections = set()
return node
def begin_statement(self, stmt):
"""Marks the beginning of a statement.
Args:
stmt: Hashable, a key by which the statement can be identified in
the CFG's stmt_prev and stmt_next attributes
"""
self.active_stmts.add(stmt)
def end_statement(self, stmt):
"""Marks the end of a statement.
Args:
stmt: Hashable, a key by which the statement can be identified in
the CFG's stmt_prev and stmt_next attributes; must match a key
previously passed to begin_statement.
"""
self.active_stmts.remove(stmt)
def add_ordinary_node(self, ast_node):
"""Grows the graph by adding an ordinary CFG node.
Ordinary nodes are followed by the next node, in lexical order, that is,
they become the new leaf set.
Args:
ast_node: ast.AST
Returns:
Node
"""
node = self._add_new_node(ast_node)
self.leaves = set((node,))
return node
def _add_jump_node(self, ast_node, guards):
"""Grows the graph by adding a jump node.
Jump nodes are added to the current leaf set, and the leaf set becomes
empty. If the jump node is the last in a cond section, then it may be added
back to the leaf set by a separate mechanism.
Args:
ast_node: ast.AST
guards: Tuple[ast.AST, ...], the finally sections active for this node
Returns:
Node
"""
node = self._add_new_node(ast_node)
self.leaves = set()
# The guards themselves may not yet be complete, and will be wired later.
self.finally_sections[node] = guards
return node
def _connect_jump_to_finally_sections(self, node):
"""Connects a jump node to the finally sections protecting it."""
cursor = set((node,))
if node not in self.finally_sections:
return cursor
for guard_section_id in self.finally_sections[node]:
guard_begin, guard_ends = self.finally_section_subgraphs[guard_section_id]
self._connect_nodes(cursor, guard_begin)
cursor = guard_ends
del self.finally_sections[node]
# TODO(mdan): Should garbage-collect finally_section_subgraphs.
return cursor
def add_exit_node(self, ast_node, section_id, guards):
"""Grows the graph by adding an exit node.
This node becomes an exit for the current section.
Args:
ast_node: ast.AST
section_id: Hashable, the node for which ast_node should be considered
to be an exit node
guards: Tuple[ast.AST, ...], the finally sections that guard ast_node
Returns:
Node
"""
node = self._add_jump_node(ast_node, guards)
self.exits[section_id].add(node)
return node
def add_continue_node(self, ast_node, section_id, guards):
"""Grows the graph by adding a reentry node.
This node causes control flow to go back to the loop section's entry.
Args:
ast_node: ast.AST
section_id: Hashable, the node for which ast_node should be considered
to be an exit node
guards: Tuple[ast.AST, ...], the finally sections that guard ast_node
"""
node = self._add_jump_node(ast_node, guards)
self.continues[section_id].add(node)
def connect_raise_node(self, node, except_guards):
"""Adds extra connection between a raise node and containing except guards.
The node is a graph node, not an ast node.
Args:
node: Node
except_guards: Tuple[ast.AST, ...], the except sections that guard node
"""
for guard in except_guards:
if guard in self.raises:
self.raises[guard].append(node)
else:
self.raises[guard] = [node]
def enter_section(self, section_id):
"""Enters a regular section.
Regular sections admit exit jumps, which end the section.
Args:
section_id: Hashable, the same node that will be used in calls to the
ast_node arg passed to add_exit_node
"""
assert section_id not in self.exits
self.exits[section_id] = set()
def exit_section(self, section_id):
"""Exits a regular section."""
# Exits are jump nodes, which may be protected.
for exit_ in self.exits[section_id]:
self.leaves |= self._connect_jump_to_finally_sections(exit_)
del self.exits[section_id]
def enter_loop_section(self, section_id, entry_node):
"""Enters a loop section.
Loop sections define an entry node. The end of the section always flows back
to the entry node. These admit continue jump nodes which also flow to the
entry node.
Args:
section_id: Hashable, the same node that will be used in calls to the
ast_node arg passed to add_continue_node
entry_node: ast.AST, the entry node into the loop (e.g. the test node
for while loops)
"""
assert section_id not in self.section_entry
assert section_id not in self.continues
self.continues[section_id] = set()
node = self.add_ordinary_node(entry_node)
self.section_entry[section_id] = node
def exit_loop_section(self, section_id):
"""Exits a loop section."""
self._connect_nodes(self.leaves, self.section_entry[section_id])
# continues are jump nodes, which may be protected.
for reentry in self.continues[section_id]:
guard_ends = self._connect_jump_to_finally_sections(reentry)
self._connect_nodes(guard_ends, self.section_entry[section_id])
# Loop nodes always loop back.
self.leaves = set((self.section_entry[section_id],))
del self.continues[section_id]
del self.section_entry[section_id]
def enter_cond_section(self, section_id):
"""Enters a conditional section.
Conditional sections define an entry node, and one or more branches.
Args:
section_id: Hashable, the same node that will be used in calls to the
section_id arg passed to new_cond_branch
"""
assert section_id not in self.cond_entry
assert section_id not in self.cond_leaves
self.cond_leaves[section_id] = []
def new_cond_branch(self, section_id):
"""Begins a new branch in a cond section."""
assert section_id in self.cond_leaves
if section_id in self.cond_entry:
# Subsequent splits move back to the split point, and memorize the
# current leaves.
self.cond_leaves[section_id].append(self.leaves)
self.leaves = self.cond_entry[section_id]
else:
# If this is the first time we split a section, just remember the split
# point.
self.cond_entry[section_id] = self.leaves
def exit_cond_section(self, section_id):
"""Exits a conditional section."""
for split in self.cond_leaves[section_id]:
self.leaves |= split
del self.cond_entry[section_id]
del self.cond_leaves[section_id]
def enter_except_section(self, section_id):
"""Enters an except section."""
if section_id in self.raises:
self.leaves.update(self.raises[section_id])
def enter_finally_section(self, section_id):
"""Enters a finally section."""
# TODO(mdan): This, not the caller, should track the active sections.
self.finally_section_subgraphs[section_id] = [None, None]
if self.leaves:
self.finally_section_has_direct_flow[section_id] = True
else:
self.finally_section_has_direct_flow[section_id] = False
self.pending_finally_sections.add(section_id)
def exit_finally_section(self, section_id):
"""Exits a finally section."""
assert section_id not in self.pending_finally_sections, 'Empty finally?'
self.finally_section_subgraphs[section_id][1] = self.leaves
# If the guard can only be reached by a jump, then it will not flow
# into the statement that follows it.
if not self.finally_section_has_direct_flow[section_id]:
self.leaves = set()
del self.finally_section_has_direct_flow[section_id]
def build(self):
"""Returns the CFG accumulated so far and resets the builder.
Returns:
Graph
"""
# Freeze the nodes.
for node in self.node_index.values():
node.freeze()
# Build the statement edges.
stmt_next = {}
stmt_prev = {}
for node in self.node_index.values():
for stmt in self.owners[node]:
if stmt not in stmt_prev:
stmt_prev[stmt] = set()
if stmt not in stmt_next:
stmt_next[stmt] = set()
for first, second in self.forward_edges:
stmts_exited = self.owners[first] - self.owners[second]
for stmt in stmts_exited:
stmt_next[stmt].add(second)
stmts_entered = self.owners[second] - self.owners[first]
for stmt in stmts_entered:
stmt_prev[stmt].add(first)
for stmt in stmt_next:
stmt_next[stmt] = frozenset(stmt_next[stmt])
for stmt in stmt_prev:
stmt_prev[stmt] = frozenset(stmt_prev[stmt])
# Construct the final graph object.
result = Graph(
entry=self.head,
exit=self.leaves,
error=self.errors,
index=self.node_index,
stmt_prev=stmt_prev,
stmt_next=stmt_next)
# Reset the state.
self.reset()
return result
class AstToCfg(gast.NodeVisitor):
"""Converts an AST to CFGs.
A separate CFG will be constructed for each function.
"""
def __init__(self):
super(AstToCfg, self).__init__()
self.builder_stack = []
self.builder = None
self.cfgs = {}
self.lexical_scopes = []
def _enter_lexical_scope(self, node):
self.lexical_scopes.append(node)
def _exit_lexical_scope(self, node):
leaving_node = self.lexical_scopes.pop()
assert node == leaving_node
def _get_enclosing_finally_scopes(self, stop_at):
included = []
for node in reversed(self.lexical_scopes):
if isinstance(node, gast.Try) and node.finalbody:
included.append(node)
if isinstance(node, stop_at):
return node, included
return None, included
def _get_enclosing_except_scopes(self, stop_at):
included = []
for node in reversed(self.lexical_scopes):
if isinstance(node, gast.Try) and node.handlers:
included.extend(node.handlers)
if isinstance(node, stop_at):
break
return included
def _process_basic_statement(self, node):
self.generic_visit(node)
self.builder.add_ordinary_node(node)
def _process_exit_statement(
self, node, exits_nodes_of_type, may_exit_via_except=False):
self.generic_visit(node)
# Note: this is safe because we process functions separately.
try_node, guards = self._get_enclosing_finally_scopes(exits_nodes_of_type)
assert try_node is not None, '{} that is not enclosed by any of {}'.format(
node, exits_nodes_of_type)
node = self.builder.add_exit_node(node, try_node, guards)
if may_exit_via_except:
except_guards = self._get_enclosing_except_scopes(exits_nodes_of_type)
self.builder.connect_raise_node(node, except_guards)
def _process_continue_statement(self, node, *loops_to_nodes_of_type):
# Note: this is safe because we process functions separately.
try_node, guards = self._get_enclosing_finally_scopes(
tuple(loops_to_nodes_of_type))
if try_node is None:
raise ValueError('%s that is not enclosed by any of %s' %
(node, loops_to_nodes_of_type))
self.builder.add_continue_node(node, try_node, guards)
def visit_ClassDef(self, node):
# We also keep the ClassDef node in the CFG, since it technically is a
# statement.
# For example, this is legal and allows executing user code:
#
# class Foo(bar()):
# pass
#
# It also has a scope:
#
# class Bar(object):
# a = 1
if self.builder is None:
self.generic_visit(node)
return
self.builder.add_ordinary_node(node)
self.builder_stack.append(self.builder)
self.builder = GraphBuilder(node)
self._enter_lexical_scope(node)
self._process_basic_statement(node)
self._exit_lexical_scope(node)
# TODO(mdan): Track the CFG local to the class definition as well?
self.builder = self.builder_stack.pop()
def _process_function_def(self, node, is_lambda):
# The function body is stored in a separate graph, because function
# definitions have effects very different from function calls.
if self.builder is not None:
self.builder.add_ordinary_node(node)
self.builder_stack.append(self.builder)
self.builder = GraphBuilder(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
self._process_basic_statement(node.args)
if is_lambda:
self._process_exit_statement(node.body, (gast.Lambda,))
else:
for stmt in node.body:
self.visit(stmt)
self.builder.exit_section(node)
self._exit_lexical_scope(node)
self.cfgs[node] = self.builder.build()
self.builder = self.builder_stack.pop()
def visit_FunctionDef(self, node):
self._process_function_def(node, is_lambda=False)
def visit_Lambda(self, node):
self._process_function_def(node, is_lambda=True)
def visit_Return(self, node):
self._process_exit_statement(node, (gast.FunctionDef,))
def visit_Import(self, node):
self._process_basic_statement(node)
def visit_ImportFrom(self, node):
self._process_basic_statement(node)
def visit_Expr(self, node):
self._process_basic_statement(node)
def visit_Assign(self, node):
self._process_basic_statement(node)
def visit_AnnAssign(self, node):
self._process_basic_statement(node)
def visit_AugAssign(self, node):
self._process_basic_statement(node)
def visit_Pass(self, node):
self._process_basic_statement(node)
def visit_Global(self, node):
self._process_basic_statement(node)
def visit_Nonlocal(self, node):
self._process_basic_statement(node)
def visit_Print(self, node):
self._process_basic_statement(node)
def visit_Raise(self, node):
self._process_exit_statement(
node, (gast.FunctionDef,), may_exit_via_except=True)
self.builder.errors.add(node)
def visit_Assert(self, node):
# Ignoring the effect of exceptions.
self._process_basic_statement(node)
def visit_Delete(self, node):
self._process_basic_statement(node)
def visit_If(self, node):
# No need to track ifs as lexical scopes, for now.
# Lexical scopes are generally tracked in order to be able to resolve the
# targets of jump statements like break/continue/etc. Since there is no
# statement that can interrupt a conditional, we don't need to track their
# lexical scope. That may change in the future.
self.builder.begin_statement(node)
self.builder.enter_cond_section(node)
self._process_basic_statement(node.test)
self.builder.new_cond_branch(node)
for stmt in node.body:
self.visit(stmt)
self.builder.new_cond_branch(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_cond_section(node)
self.builder.end_statement(node)
def visit_While(self, node):
self.builder.begin_statement(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
self.generic_visit(node.test)
self.builder.enter_loop_section(node, node.test)
for stmt in node.body:
self.visit(stmt)
self.builder.exit_loop_section(node)
# Note: although the orelse is technically part of the loop node,
# the statements inside it don't affect the loop itself. For example, a
# break in the loop's orelse will not affect the loop itself.
self._exit_lexical_scope(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_section(node)
self.builder.end_statement(node)
def visit_For(self, node):
self.builder.begin_statement(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
# Note: Strictly speaking, this should be node.target + node.iter.
# However, the activity analysis accounts for this inconsistency,
# so dataflow analysis produces the correct values.
self.generic_visit(node.iter)
self.builder.enter_loop_section(node, node.iter)
# Also include the "extra loop test" annotation, to capture things like the
# control variable for return and break in for loops.
if anno.hasanno(node, anno.Basic.EXTRA_LOOP_TEST):
self._process_basic_statement(
anno.getanno(node, anno.Basic.EXTRA_LOOP_TEST))
for stmt in node.body:
self.visit(stmt)
self.builder.exit_loop_section(node)
# Note: although the orelse is technically part of the loop node,
# they don't count as loop bodies. For example, a break in the loop's
# orelse will affect the parent loop, not the current one.
self._exit_lexical_scope(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_section(node)
self.builder.end_statement(node)
def visit_Break(self, node):
self._process_exit_statement(node, (gast.While, gast.For,))
def visit_Continue(self, node):
self._process_continue_statement(node, (gast.While, gast.For,))
def visit_ExceptHandler(self, node):
self.builder.begin_statement(node)
self.builder.enter_except_section(node)
if node.type is not None:
self.visit(node.type)
if node.name is not None:
self.visit(node.name)
for stmt in node.body:
self.visit(stmt)
self.builder.end_statement(node)
def visit_Try(self, node):
self.builder.begin_statement(node)
self._enter_lexical_scope(node)
# Note: the current simplification is that the try block fully executes
# regardless of whether an exception triggers or not. This is consistent
# with blocks free of try/except, which also don't account for the
# possibility of an exception being raised mid-block.
for stmt in node.body:
self.visit(stmt)
# The orelse is an optional continuation of the body.
if node.orelse:
block_representative = node.orelse[0]
self.builder.enter_cond_section(block_representative)
self.builder.new_cond_branch(block_representative)
for stmt in node.orelse:
self.visit(stmt)
self.builder.new_cond_branch(block_representative)
self.builder.exit_cond_section(block_representative)
self._exit_lexical_scope(node)
if node.handlers:
# Using node would be inconsistent. Using the first handler node is also
# inconsistent, but less so.
block_representative = node.handlers[0]
self.builder.enter_cond_section(block_representative)
for block in node.handlers:
self.builder.new_cond_branch(block_representative)
self.visit(block)
self.builder.new_cond_branch(block_representative)
self.builder.exit_cond_section(block_representative)
if node.finalbody:
self.builder.enter_finally_section(node)
for stmt in node.finalbody:
self.visit(stmt)
self.builder.exit_finally_section(node)
self.builder.end_statement(node)
def visit_With(self, node):
# TODO(mdan): Mark the context manager's exit call as exit guard.
for item in node.items:
self._process_basic_statement(item)
for stmt in node.body:
self.visit(stmt)
def build(node):
visitor = AstToCfg()
visitor.visit(node)
return visitor.cfgs
|
annarev/tensorflow
|
tensorflow/python/autograph/pyct/cfg.py
|
Python
|
apache-2.0
| 32,331
|
[
"VisIt"
] |
3fc8ad7f3e26b67119487a9cc12414d153131e0bb137ae92ad8520750182dbb0
|
"""
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Utilities.py 1298 2006-11-07 00:54:15Z sanxiyn $'
from version import __version__
import re
import string
import sys
from types import *
# SOAPpy modules
from Errors import *
################################################################################
# Utility infielders
################################################################################
def collapseWhiteSpace(s):
return re.sub('\s+', ' ', s).strip()
def decodeHexString(data):
conv = {
'0': 0x0, '1': 0x1, '2': 0x2, '3': 0x3, '4': 0x4,
'5': 0x5, '6': 0x6, '7': 0x7, '8': 0x8, '9': 0x9,
'a': 0xa, 'b': 0xb, 'c': 0xc, 'd': 0xd, 'e': 0xe,
'f': 0xf,
'A': 0xa, 'B': 0xb, 'C': 0xc, 'D': 0xd, 'E': 0xe,
'F': 0xf,
}
ws = string.whitespace
bin = ''
i = 0
while i < len(data):
if data[i] not in ws:
break
i += 1
low = 0
while i < len(data):
c = data[i]
if c in string.whitespace:
break
try:
c = conv[c]
except KeyError:
raise ValueError, \
"invalid hex string character `%s'" % c
if low:
bin += chr(high * 16 + c)
low = 0
else:
high = c
low = 1
i += 1
if low:
raise ValueError, "invalid hex string length"
while i < len(data):
if data[i] not in string.whitespace:
raise ValueError, \
"invalid hex string character `%s'" % c
i += 1
return bin
def encodeHexString(data):
h = ''
for i in data:
h += "%02X" % ord(i)
return h
def leapMonth(year, month):
return month == 2 and \
year % 4 == 0 and \
(year % 100 != 0 or year % 400 == 0)
def cleanDate(d, first = 0):
ranges = (None, (1, 12), (1, 31), (0, 23), (0, 59), (0, 61))
months = (0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
names = ('year', 'month', 'day', 'hours', 'minutes', 'seconds')
if len(d) != 6:
raise ValueError, "date must have 6 elements"
for i in range(first, 6):
s = d[i]
if type(s) == FloatType:
if i < 5:
try:
s = int(s)
except OverflowError:
if i > 0:
raise
s = long(s)
if s != d[i]:
raise ValueError, "%s must be integral" % names[i]
d[i] = s
elif type(s) == LongType:
try: s = int(s)
except: pass
elif type(s) != IntType:
raise TypeError, "%s isn't a valid type" % names[i]
if i == first and s < 0:
continue
if ranges[i] != None and \
(s < ranges[i][0] or ranges[i][1] < s):
raise ValueError, "%s out of range" % names[i]
if first < 6 and d[5] >= 61:
raise ValueError, "seconds out of range"
if first < 2:
leap = first < 1 and leapMonth(d[0], d[1])
if d[2] > months[d[1]] + leap:
raise ValueError, "day out of range"
def debugHeader(title):
s = '*** ' + title + ' '
print s + ('*' * (72 - len(s)))
def debugFooter(title):
print '*' * 72
sys.stdout.flush()
|
burzillibus/RobHome
|
venv/lib/python2.7/site-packages/SOAPpy/Utilities.py
|
Python
|
mit
| 5,112
|
[
"Brian"
] |
5d454b3f53e6b6f6c16a84493ad7b5361dbad0de4b9e520b9750412dea0d47e1
|
"""
Copyright (c) 2014 Brian Muller
Copyright (c) 2015 OpenBazaar
"""
import abc
import random
from base64 import b64encode
from config import PROTOCOL_VERSION
from dht.node import Node
from dht.utils import digest
from hashlib import sha1
from log import Logger
from protos.message import Message, Command, NOT_FOUND, HOLE_PUNCH, ORDER
from protos.objects import FULL_CONE, RESTRICTED, SYMMETRIC
from twisted.internet import defer, reactor
from txrudp.connection import State
class RPCProtocol:
"""
This is an abstract class for processing and sending rpc messages.
A class that implements the `MessageProcessor` interface probably should
extend this as it does most of the work of keeping track of messages.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, sourceNode, router, waitTimeout=60):
"""
Args:
sourceNode: A protobuf `Node` object containing info about this node.
router: A `RoutingTable` object from dht.routing. Implies a `network.Server` object
must be started first.
waitTimeout: Timeout for whole messages. Note the txrudp layer has a per-packet
timeout but invalid responses wont trigger it. The waitTimeout on this
layer needs to be long enough to allow whole messages (ex. images) to
transmit.
"""
self.sourceNode = sourceNode
self.router = router
self._waitTimeout = waitTimeout
self._outstanding = {}
self.log = Logger(system=self)
def receive_message(self, message, sender, connection, ban_score):
if message.testnet != self.multiplexer.testnet:
self.log.warning("received message from %s with incorrect network parameters." %
str(connection.dest_addr))
connection.shutdown()
return False
if sender.vendor:
self.multiplexer.vendors[sender.id] = sender
msgID = message.messageID
if message.command == NOT_FOUND:
data = None
else:
data = tuple(message.arguments)
if msgID in self._outstanding:
self._acceptResponse(msgID, data, sender)
elif message.command != NOT_FOUND:
ban_score.process_message(connection.dest_addr, message)
self._acceptRequest(msgID, str(Command.Name(message.command)).lower(), data, sender, connection)
def _acceptResponse(self, msgID, data, sender):
if data is not None:
msgargs = (b64encode(msgID), sender)
self.log.debug("received response for message id %s from %s" % msgargs)
else:
self.log.warning("received 404 error response from %s" % sender)
d = self._outstanding[msgID][0]
if self._outstanding[msgID][2].active():
self._outstanding[msgID][2].cancel()
d.callback((True, data))
del self._outstanding[msgID]
def _acceptRequest(self, msgID, funcname, args, sender, connection):
self.log.debug("received request from %s, command %s" % (sender, funcname.upper()))
f = getattr(self, "rpc_%s" % funcname, None)
if f is None or not callable(f):
msgargs = (self.__class__.__name__, funcname)
self.log.error("%s has no callable method rpc_%s; ignoring request" % msgargs)
return False
if funcname == "hole_punch":
f(sender, *args)
else:
d = defer.maybeDeferred(f, sender, *args)
d.addCallback(self._sendResponse, funcname, msgID, sender, connection)
d.addErrback(self._sendResponse, "bad_request", msgID, sender, connection)
def _sendResponse(self, response, funcname, msgID, sender, connection):
self.log.debug("sending response for msg id %s to %s" % (b64encode(msgID), sender))
m = Message()
m.messageID = msgID
m.sender.MergeFrom(self.sourceNode.getProto())
m.protoVer = PROTOCOL_VERSION
m.testnet = self.multiplexer.testnet
if response is None:
m.command = NOT_FOUND
else:
m.command = Command.Value(funcname.upper())
if not isinstance(response, list):
response = [response]
for arg in response:
m.arguments.append(str(arg))
m.signature = self.signing_key.sign(m.SerializeToString())[:64]
connection.send_message(m.SerializeToString())
def timeout(self, node):
"""
This timeout is called by the txrudp connection handler. We will run through the
outstanding messages and callback false on any waiting on this IP address.
"""
address = (node.ip, node.port)
for msgID, val in self._outstanding.items():
if address == val[1]:
val[0].callback((False, None))
if self._outstanding[msgID][2].active():
self._outstanding[msgID][2].cancel()
del self._outstanding[msgID]
self.router.removeContact(node)
try:
self.multiplexer[address].shutdown()
except Exception:
pass
def rpc_hole_punch(self, sender, ip, port, relay="False"):
"""
A method for handling an incoming HOLE_PUNCH message. Relay the message
to the correct node if it's not for us. Otherwise send a datagram to allow
the other node to punch through our NAT.
"""
if relay == "True":
self.log.debug("relaying hole punch packet to %s:%s for %s:%s" %
(ip, port, sender.ip, str(sender.port)))
self.hole_punch(Node(digest("null"), ip, int(port), nat_type=FULL_CONE), sender.ip, sender.port)
else:
self.log.debug("punching through NAT for %s:%s" % (ip, port))
# pylint: disable=W0612
for i in range(20):
self.multiplexer.send_datagram("", (ip, int(port)))
def __getattr__(self, name):
if name.startswith("_") or name.startswith("rpc_"):
return object.__getattr__(self, name)
try:
return object.__getattr__(self, name)
except AttributeError:
pass
def func(node, *args):
address = (node.ip, node.port)
msgID = sha1(str(random.getrandbits(255))).digest()
m = Message()
m.messageID = msgID
m.sender.MergeFrom(self.sourceNode.getProto())
m.command = Command.Value(name.upper())
m.protoVer = PROTOCOL_VERSION
for arg in args:
m.arguments.append(str(arg))
m.testnet = self.multiplexer.testnet
m.signature = self.signing_key.sign(m.SerializeToString())[:64]
data = m.SerializeToString()
relay_addr = None
if node.nat_type == SYMMETRIC or \
(node.nat_type == RESTRICTED and self.sourceNode.nat_type == SYMMETRIC):
relay_addr = node.relay_node
d = defer.Deferred()
if m.command != HOLE_PUNCH:
timeout = reactor.callLater(self._waitTimeout, self.timeout, node)
self._outstanding[msgID] = [d, address, timeout]
self.log.debug("calling remote function %s on %s (msgid %s)" % (name, address, b64encode(msgID)))
self.multiplexer.send_message(data, address, relay_addr)
if self.multiplexer[address].state != State.CONNECTED and \
node.nat_type == RESTRICTED and \
self.sourceNode.nat_type != SYMMETRIC and \
node.relay_node is not None:
self.hole_punch(Node(digest("null"), node.relay_node[0], node.relay_node[1], nat_type=FULL_CONE),
address[0], address[1], "True")
self.log.debug("sending hole punch message to %s" % address[0] + ":" + str(address[1]))
return d
return func
|
OpenBazaar/OpenBazaar-Server
|
net/rpcudp.py
|
Python
|
mit
| 8,063
|
[
"Brian"
] |
52a9f775e4ad8310751200bb657ec9bbf20f3183f640c39b176de9d8f8ae1a77
|
from __future__ import division, print_function
import numpy as np
from dipy.denoise.denspeed import nlmeans_3d
# from warnings import warn
# import warnings
# warnings.simplefilter('always', DeprecationWarning)
# warn(DeprecationWarning("Module 'dipy.denoise.nlmeans' is deprecated,"
# " use module 'dipy.denoise.non_local_means' instead"))
def nlmeans(arr, sigma, mask=None, patch_radius=1, block_radius=5,
rician=True, num_threads=None):
r""" Non-local means for denoising 3D and 4D images
Parameters
----------
arr : 3D or 4D ndarray
The array to be denoised
mask : 3D ndarray
sigma : float or 3D array
standard deviation of the noise estimated from the data
patch_radius : int
patch size is ``2 x patch_radius + 1``. Default is 1.
block_radius : int
block size is ``2 x block_radius + 1``. Default is 5.
rician : boolean
If True the noise is estimated as Rician, otherwise Gaussian noise
is assumed.
num_threads : int
Number of threads. If None (default) then all available threads
will be used (all CPU cores).
Returns
-------
denoised_arr : ndarray
the denoised ``arr`` which has the same shape as ``arr``.
References
----------
.. [Descoteaux08] Descoteaux, Maxim and Wiest-Daessle`, Nicolas and Prima,
Sylvain and Barillot, Christian and Deriche, Rachid
Impact of Rician Adapted Non-Local Means Filtering on
HARDI, MICCAI 2008
"""
# warn(DeprecationWarning("function 'dipy.denoise.nlmeans'"
# " is deprecated, use module "
# "'dipy.denoise.non_local_means'"
# " instead"))
if arr.ndim == 3:
sigma = np.ones(arr.shape, dtype=np.float64) * sigma
return nlmeans_3d(arr, mask, sigma,
patch_radius, block_radius,
rician, num_threads).astype(arr.dtype)
elif arr.ndim == 4:
denoised_arr = np.zeros_like(arr)
if isinstance(sigma, np.ndarray) and sigma.ndim == 3:
sigma = (np.ones(arr.shape, dtype=np.float64) *
sigma[..., np.newaxis])
else:
sigma = np.ones(arr.shape, dtype=np.float64) * sigma
for i in range(arr.shape[-1]):
denoised_arr[..., i] = nlmeans_3d(arr[..., i],
mask,
sigma[..., i],
patch_radius,
block_radius,
rician,
num_threads).astype(arr.dtype)
return denoised_arr
else:
raise ValueError("Only 3D or 4D array are supported!", arr.shape)
|
nilgoyyou/dipy
|
dipy/denoise/nlmeans.py
|
Python
|
bsd-3-clause
| 2,956
|
[
"Gaussian"
] |
ea6f596759223b66f16be6fc3caa4c7d377e95edc91e3cefc0a184281056712c
|
import numpy as np
from collections import Iterable
from menpo.image import Image
from menpofit.visualize import view_image_multiple_landmarks
from menpofit.error import euclidean_bb_normalised_error
def _rescale_shapes_to_reference(shapes, affine_transform, scale_transform):
rescaled_shapes = []
for shape in shapes:
shape = scale_transform.apply(shape)
rescaled_shapes.append(affine_transform.apply(shape))
return rescaled_shapes
def _parse_iters(iters, n_shapes):
if not (iters is None or isinstance(iters, int) or
isinstance(iters, list)):
raise ValueError('iters must be either int or list or None')
if iters is None:
iters = list(range(n_shapes))
if isinstance(iters, int):
iters = [iters]
return iters
def _get_scale_of_iter(iter_i, reconstruction_indices):
ids = np.array(reconstruction_indices)
return np.nonzero(iter_i >= ids)[0][-1]
class Result(object):
r"""
Class for defining a basic fitting result. It holds the final shape of a
fitting process and, optionally, the initial shape, ground truth shape
and the image object.
Parameters
----------
final_shape : `menpo.shape.PointCloud`
The final shape of the fitting process.
image : `menpo.image.Image` or `subclass` or ``None``, optional
The image on which the fitting process was applied. Note that a copy
of the image will be assigned as an attribute. If ``None``, then no
image is assigned.
initial_shape : `menpo.shape.PointCloud` or ``None``, optional
The initial shape that was provided to the fitting method to
initialise the fitting process. If ``None``, then no initial shape is
assigned.
gt_shape : `menpo.shape.PointCloud` or ``None``, optional
The ground truth shape associated with the image. If ``None``, then no
ground truth shape is assigned.
"""
def __init__(self, final_shape, image=None, initial_shape=None,
gt_shape=None):
self._final_shape = final_shape
self._initial_shape = initial_shape
self._gt_shape = gt_shape
# If image is provided, create a copy
self._image = None
if image is not None:
self._image = Image(image.pixels)
@property
def is_iterative(self):
r"""
Flag whether the object is an iterative fitting result.
:type: `bool`
"""
return False
@property
def final_shape(self):
r"""
Returns the final shape of the fitting process.
:type: `menpo.shape.PointCloud`
"""
return self._final_shape
@property
def initial_shape(self):
r"""
Returns the initial shape that was provided to the fitting method to
initialise the fitting process. In case the initial shape does not
exist, then ``None`` is returned.
:type: `menpo.shape.PointCloud` or ``None``
"""
return self._initial_shape
@property
def gt_shape(self):
r"""
Returns the ground truth shape associated with the image. In case there
is not an attached ground truth shape, then ``None`` is returned.
:type: `menpo.shape.PointCloud` or ``None``
"""
return self._gt_shape
@property
def image(self):
r"""
Returns the image that the fitting was applied on, if it was provided.
Otherwise, it returns ``None``.
:type: `menpo.shape.Image` or `subclass` or ``None``
"""
return self._image
def final_error(self, compute_error=None):
r"""
Returns the final error of the fitting process, if the ground truth
shape exists. This is the error computed based on the `final_shape`.
Parameters
----------
compute_error: `callable`, optional
Callable that computes the error between the fitted and
ground truth shapes.
Returns
-------
final_error : `float`
The final error at the end of the fitting process.
Raises
------
ValueError
Ground truth shape has not been set, so the final error cannot be
computed
"""
if compute_error is None:
compute_error = euclidean_bb_normalised_error
if self.gt_shape is not None:
return compute_error(self.final_shape, self.gt_shape)
else:
raise ValueError('Ground truth shape has not been set, so the '
'final error cannot be computed')
def initial_error(self, compute_error=None):
r"""
Returns the initial error of the fitting process, if the ground truth
shape and initial shape exist. This is the error computed based on the
`initial_shape`.
Parameters
----------
compute_error: `callable`, optional
Callable that computes the error between the initial and
ground truth shapes.
Returns
-------
initial_error : `float`
The initial error at the beginning of the fitting process.
Raises
------
ValueError
Initial shape has not been set, so the initial error cannot be
computed
ValueError
Ground truth shape has not been set, so the initial error cannot be
computed
"""
if compute_error is None:
compute_error = euclidean_bb_normalised_error
if self.initial_shape is None:
raise ValueError('Initial shape has not been set, so the initial '
'error cannot be computed')
elif self.gt_shape is None:
raise ValueError('Ground truth shape has not been set, so the '
'initial error cannot be computed')
else:
return compute_error(self.initial_shape, self.gt_shape)
def view(self, figure_id=None, new_figure=False, render_image=True,
render_final_shape=True, render_initial_shape=False,
render_gt_shape=False, subplots_enabled=True, channels=None,
interpolation='bilinear', cmap_name=None, alpha=1., masked=True,
final_marker_face_colour='r', final_marker_edge_colour='k',
final_line_colour='r', initial_marker_face_colour='b',
initial_marker_edge_colour='k', initial_line_colour='b',
gt_marker_face_colour='y', gt_marker_edge_colour='k',
gt_line_colour='y', render_lines=True, line_style='-',
line_width=2, render_markers=True, marker_style='o', marker_size=4,
marker_edge_width=1., render_numbering=False,
numbers_horizontal_align='center',
numbers_vertical_align='bottom', numbers_font_name='sans-serif',
numbers_font_size=10, numbers_font_style='normal',
numbers_font_weight='normal', numbers_font_colour='k',
render_legend=True, legend_title='', legend_font_name='sans-serif',
legend_font_style='normal', legend_font_size=10,
legend_font_weight='normal', legend_marker_scale=None,
legend_location=2, legend_bbox_to_anchor=(1.05, 1.),
legend_border_axes_pad=None, legend_n_columns=1,
legend_horizontal_spacing=None, legend_vertical_spacing=None,
legend_border=True, legend_border_padding=None,
legend_shadow=False, legend_rounded_corners=False,
render_axes=False, axes_font_name='sans-serif', axes_font_size=10,
axes_font_style='normal', axes_font_weight='normal',
axes_x_limits=None, axes_y_limits=None, axes_x_ticks=None,
axes_y_ticks=None, figure_size=(10, 8)):
"""
Visualize the fitting result. The method renders the final fitted
shape and optionally the initial shape, ground truth shape and the
image, id they were provided.
Parameters
----------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
render_image : `bool`, optional
If ``True`` and the image exists, then it gets rendered.
render_final_shape : `bool`, optional
If ``True``, then the final fitting shape gets rendered.
render_initial_shape : `bool`, optional
If ``True`` and the initial fitting shape exists, then it gets
rendered.
render_gt_shape : `bool`, optional
If ``True`` and the ground truth shape exists, then it gets
rendered.
subplots_enabled : `bool`, optional
If ``True``, then the requested final, initial and ground truth
shapes get rendered on separate subplots.
channels : `int` or `list` of `int` or ``all`` or ``None``
If `int` or `list` of `int`, the specified channel(s) will be
rendered. If ``all``, all the channels will be rendered in subplots.
If ``None`` and the image is RGB, it will be rendered in RGB mode.
If ``None`` and the image is not RGB, it is equivalent to ``all``.
interpolation : See Below, optional
The interpolation used to render the image. For example, if
``bilinear``, the image will be smooth and if ``nearest``, the
image will be pixelated. Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36, hanning,
hamming, hermite, kaiser, quadric, catrom, gaussian, bessel,
mitchell, sinc, lanczos}
cmap_name: `str`, optional,
If ``None``, single channel and three channel images default
to greyscale and rgb colormaps respectively.
alpha : `float`, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
masked : `bool`, optional
If ``True``, then the image is rendered as masked.
final_marker_face_colour : See Below, optional
The face (filling) colour of the markers of the final fitting shape.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
final_marker_edge_colour : See Below, optional
The edge colour of the markers of the final fitting shape.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
final_line_colour : See Below, optional
The line colour of the final fitting shape.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
initial_marker_face_colour : See Below, optional
The face (filling) colour of the markers of the initial shape.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
initial_marker_edge_colour : See Below, optional
The edge colour of the markers of the initial shape.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
initial_line_colour : See Below, optional
The line colour of the initial shape.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
gt_marker_face_colour : See Below, optional
The face (filling) colour of the markers of the ground truth shape.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
gt_marker_edge_colour : See Below, optional
The edge colour of the markers of the ground truth shape.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
gt_line_colour : See Below, optional
The line colour of the ground truth shape.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_lines : `bool` or `list` of `bool`, optional
If ``True``, the lines will be rendered. You can either provide a
single value that will be used for all shapes or a list with a
different value per shape in (`final`, `initial`, `groundtruth`)
order.
line_style : `str` or `list` of `str`, optional
The style of the lines. You can either provide a single value that
will be used for all shapes or a list with a different value per
shape in (`final`, `initial`, `groundtruth`) order.
Example options::
{'-', '--', '-.', ':'}
line_width : `float` or `list` of `float`, optional
The width of the lines. You can either provide a single value that
will be used for all shapes or a list with a different value per
shape in (`final`, `initial`, `groundtruth`) order.
render_markers : `bool` or `list` of `bool`, optional
If ``True``, the markers will be rendered. You can either provide a
single value that will be used for all shapes or a list with a
different value per shape in (`final`, `initial`, `groundtruth`)
order.
marker_style : `str` or `list` of `str`, optional
The style of the markers. You can either provide a single value that
will be used for all shapes or a list with a different value per
shape in (`final`, `initial`, `groundtruth`) order.
Example options::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int` or `list` of `int`, optional
The size of the markers in points. You can either provide a single
value that will be used for all shapes or a list with a different
value per shape in (`final`, `initial`, `groundtruth`) order.
marker_edge_width : `float` or `list` of `float`, optional
The width of the markers' edge. You can either provide a single
value that will be used for all shapes or a list with a different
value per shape in (`final`, `initial`, `groundtruth`) order.
render_numbering : `bool`, optional
If ``True``, the landmarks will be numbered.
numbers_horizontal_align : ``{center, right, left}``, optional
The horizontal alignment of the numbers' texts.
numbers_vertical_align : ``{center, top, bottom, baseline}``, optional
The vertical alignment of the numbers' texts.
numbers_font_name : See Below, optional
The font of the numbers. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
numbers_font_size : `int`, optional
The font size of the numbers.
numbers_font_style : ``{normal, italic, oblique}``, optional
The font style of the numbers.
numbers_font_weight : See Below, optional
The font weight of the numbers.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
numbers_font_colour : See Below, optional
The font colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_legend : `bool`, optional
If ``True``, the legend will be rendered.
legend_title : `str`, optional
The title of the legend.
legend_font_name : See below, optional
The font of the legend. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
legend_font_style : ``{normal, italic, oblique}``, optional
The font style of the legend.
legend_font_size : `int`, optional
The font size of the legend.
legend_font_weight : See Below, optional
The font weight of the legend.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
legend_marker_scale : `float`, optional
The relative size of the legend markers with respect to the original
legend_location : `int`, optional
The location of the legend. The predefined values are:
=============== ==
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== ==
legend_bbox_to_anchor : (`float`, `float`) `tuple`, optional
The bbox that the legend will be anchored.
legend_border_axes_pad : `float`, optional
The pad between the axes and legend border.
legend_n_columns : `int`, optional
The number of the legend's columns.
legend_horizontal_spacing : `float`, optional
The spacing between the columns.
legend_vertical_spacing : `float`, optional
The vertical space between the legend entries.
legend_border : `bool`, optional
If ``True``, a frame will be drawn around the legend.
legend_border_padding : `float`, optional
The fractional whitespace inside the legend border.
legend_shadow : `bool`, optional
If ``True``, a shadow will be drawn behind legend.
legend_rounded_corners : `bool`, optional
If ``True``, the frame's corners will be rounded (fancybox).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{normal, italic, oblique}``, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the Image as a percentage of the Image's width. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the Image as a percentage of the Image's height. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None`` optional
The size of the figure in inches.
Returns
-------
renderer : `class`
The renderer object.
"""
# Create image instance
if self.image is None:
image = Image(np.zeros((10, 10)))
render_image = False
else:
image = Image(self.image.pixels)
# Assign pointclouds to image
groups = []
face_colours = []
edge_colours = []
line_colours = []
subplots_titles = {}
if render_final_shape:
image.landmarks['final'] = self.final_shape
groups.append('final')
face_colours.append(final_marker_face_colour)
edge_colours.append(final_marker_edge_colour)
line_colours.append(final_line_colour)
subplots_titles['final'] = 'Final'
if self.initial_shape is not None and render_initial_shape:
image.landmarks['initial'] = self.initial_shape
groups.append('initial')
face_colours.append(initial_marker_face_colour)
edge_colours.append(initial_marker_edge_colour)
line_colours.append(initial_line_colour)
subplots_titles['initial'] = 'Initial'
if self.gt_shape is not None and render_gt_shape:
image.landmarks['groundtruth'] = self.gt_shape
groups.append('groundtruth')
face_colours.append(gt_marker_face_colour)
edge_colours.append(gt_marker_edge_colour)
line_colours.append(gt_line_colour)
subplots_titles['groundtruth'] = 'Groundtruth'
# Render
return view_image_multiple_landmarks(
image, groups, with_labels=None, figure_id=figure_id,
new_figure=new_figure, subplots_enabled=subplots_enabled,
subplots_titles=subplots_titles, render_image=render_image,
render_landmarks=True, masked=masked,
channels=channels, interpolation=interpolation,
cmap_name=cmap_name, alpha=alpha, image_view=True,
render_lines=render_lines, line_style=line_style,
line_width=line_width, line_colour=line_colours,
render_markers=render_markers, marker_style=marker_style,
marker_size=marker_size, marker_edge_width=marker_edge_width,
marker_edge_colour=edge_colours,
marker_face_colour=face_colours,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_legend=render_legend, legend_title=legend_title,
legend_font_name=legend_font_name,
legend_font_style=legend_font_style,
legend_font_size=legend_font_size,
legend_font_weight=legend_font_weight,
legend_marker_scale=legend_marker_scale,
legend_location=legend_location,
legend_bbox_to_anchor=legend_bbox_to_anchor,
legend_border_axes_pad=legend_border_axes_pad,
legend_n_columns=legend_n_columns,
legend_horizontal_spacing=legend_horizontal_spacing,
legend_vertical_spacing=legend_vertical_spacing,
legend_border=legend_border,
legend_border_padding=legend_border_padding,
legend_shadow=legend_shadow,
legend_rounded_corners=legend_rounded_corners,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size, axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits, axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks, figure_size=figure_size)
def view_widget(self, browser_style='buttons', figure_size=(10, 8),
style='coloured'):
r"""
Visualizes the result object using an interactive widget.
Parameters
----------
browser_style : {``'buttons'``, ``'slider'``}, optional
It defines whether the selector of the images will have the form of
plus/minus buttons or a slider.
figure_size : (`int`, `int`), optional
The initial size of the rendered figure.
style : {``'coloured'``, ``'minimal'``}, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
"""
try:
from menpowidgets import visualize_fitting_result
visualize_fitting_result(self, figure_size=figure_size, style=style,
browser_style=browser_style)
except ImportError:
from menpo.visualize.base import MenpowidgetsMissingError
raise MenpowidgetsMissingError()
def __str__(self):
out = "Fitting result of {} landmark points.".format(
self.final_shape.n_points)
if self.gt_shape is not None:
if self.initial_shape is not None:
out += "\nInitial error: {:.4f}".format(self.initial_error())
out += "\nFinal error: {:.4f}".format(self.final_error())
return out
class NonParametricIterativeResult(Result):
r"""
Class for defining a non-parametric iterative fitting result, i.e. the
result of a method that does not optimize over a parametric shape model. It
holds the shapes of all the iterations of the fitting procedure. It can
optionally store the image on which the fitting was applied, as well as its
ground truth shape.
Parameters
----------
shapes : `list` of `menpo.shape.PointCloud`
The `list` of shapes per iteration. Note that the list does not
include the initial shape. The last member of the list is the final
shape.
initial_shape : `menpo.shape.PointCloud` or ``None``, optional
The initial shape from which the fitting process started. If ``None``,
then no initial shape is assigned.
image : `menpo.image.Image` or `subclass` or ``None``, optional
The image on which the fitting process was applied. Note that a copy
of the image will be assigned as an attribute. If ``None``, then no
image is assigned.
gt_shape : `menpo.shape.PointCloud` or ``None``, optional
The ground truth shape associated with the image. If ``None``, then no
ground truth shape is assigned.
costs : `list` of `float` or ``None``, optional
The `list` of cost per iteration. If ``None``, then it is assumed that
the cost function cannot be computed for the specific algorithm. It must
have the same length as `shapes`.
"""
def __init__(self, shapes, initial_shape=None, image=None, gt_shape=None,
costs=None):
super(NonParametricIterativeResult, self).__init__(
final_shape=shapes[-1], image=image, initial_shape=initial_shape,
gt_shape=gt_shape)
self._n_iters = len(shapes)
# If initial shape is provided, then add it in the beginning of shapes
self._shapes = shapes
if self.initial_shape is not None:
self._shapes = [self.initial_shape] + self._shapes
# Add costs as property
self._costs = costs
@property
def is_iterative(self):
r"""
Flag whether the object is an iterative fitting result.
:type: `bool`
"""
return True
@property
def shapes(self):
r"""
Returns the `list` of shapes obtained at each iteration of the fitting
process. The `list` includes the `initial_shape` (if it exists) and
`final_shape`.
:type: `list` of `menpo.shape.PointCloud`
"""
return self._shapes
@property
def n_iters(self):
r"""
Returns the total number of iterations of the fitting process.
:type: `int`
"""
return self._n_iters
def to_result(self, pass_image=True, pass_initial_shape=True,
pass_gt_shape=True):
r"""
Returns a :map:`Result` instance of the object, i.e. a fitting result
object that does not store the iterations. This can be useful for
reducing the size of saved fitting results.
Parameters
----------
pass_image : `bool`, optional
If ``True``, then the image will get passed (if it exists).
pass_initial_shape : `bool`, optional
If ``True``, then the initial shape will get passed (if it exists).
pass_gt_shape : `bool`, optional
If ``True``, then the ground truth shape will get passed (if it
exists).
Returns
-------
result : :map:`Result`
The final "lightweight" fitting result.
"""
image = None
if pass_image:
image = self.image
initial_shape = None
if pass_initial_shape:
initial_shape = self.initial_shape
gt_shape = None
if pass_gt_shape:
gt_shape = self.gt_shape
return Result(self.final_shape, image=image,
initial_shape=initial_shape, gt_shape=gt_shape)
def errors(self, compute_error=None):
r"""
Returns a list containing the error at each fitting iteration, if the
ground truth shape exists.
Parameters
----------
compute_error: `callable`, optional
Callable that computes the error between the shape at each
iteration and the ground truth shape.
Returns
-------
errors : `list` of `float`
The error at each iteration of the fitting process.
Raises
------
ValueError
Ground truth shape has not been set, so the final error cannot be
computed
"""
if compute_error is None:
compute_error = euclidean_bb_normalised_error
if self.gt_shape is not None:
return [compute_error(t, self.gt_shape)
for t in self.shapes]
else:
raise ValueError('Ground truth shape has not been set, so the '
'errors per iteration cannot be computed')
def plot_errors(self, compute_error=None, figure_id=None,
new_figure=False, render_lines=True, line_colour='b',
line_style='-', line_width=2, render_markers=True,
marker_style='o', marker_size=4, marker_face_colour='b',
marker_edge_colour='k', marker_edge_width=1.,
render_axes=True, axes_font_name='sans-serif',
axes_font_size=10, axes_font_style='normal',
axes_font_weight='normal', axes_x_limits=0.,
axes_y_limits=None, axes_x_ticks=None,
axes_y_ticks=None, figure_size=(10, 6),
render_grid=True, grid_line_style='--',
grid_line_width=0.5):
r"""
Plot of the error evolution at each fitting iteration.
Parameters
----------
compute_error: `callable`, optional
Callable that computes the error between the shape at each
iteration and the ground truth shape.
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
render_lines : `bool`, optional
If ``True``, the line will be rendered.
line_colour : `colour` or ``None`` (See below), optional
The colour of the line. If ``None``, the colour is sampled from
the jet colormap.
Example `colour` options are ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
line_style : `str` (See below), optional
The style of the lines. Example options::
{-, --, -., :}
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : `str` (See below), optional
The style of the markers.
Example `marker` options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : `colour` or ``None``, optional
The face (filling) colour of the markers. If ``None``, the colour
is sampled from the jet colormap.
Example `colour` options are ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_colour : `colour` or ``None``, optional
The edge colour of the markers. If ``None``, the colour
is sampled from the jet colormap.
Example `colour` options are ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The width of the markers' edge.
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : `str` (See below), optional
The font of the axes.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : `str` (See below), optional
The font style of the axes.
Example options ::
{normal, italic, oblique}
axes_font_weight : `str` (See below), optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the graph as a percentage of the curves' width. If
`tuple` or `list`, then it defines the axis limits. If ``None``,
then the limits are set automatically.
axes_y_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the graph as a percentage of the curves' height.
If `tuple` or `list`, then it defines the axis limits. If ``None``,
then the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) or ``None``, optional
The size of the figure in inches.
render_grid : `bool`, optional
If ``True``, the grid will be rendered.
grid_line_style : ``{'-', '--', '-.', ':'}``, optional
The style of the grid lines.
grid_line_width : `float`, optional
The width of the grid lines.
Returns
-------
renderer : `menpo.visualize.GraphPlotter`
The renderer object.
"""
from menpo.visualize import plot_curve
errors = self.errors(compute_error=compute_error)
return plot_curve(
x_axis=list(range(len(errors))), y_axis=[errors], figure_id=figure_id,
new_figure=new_figure, title='Fitting Errors per Iteration',
x_label='Iteration', y_label='Fitting Error',
axes_x_limits=axes_x_limits, axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks, axes_y_ticks=axes_y_ticks,
render_lines=render_lines, line_colour=line_colour,
line_style=line_style, line_width=line_width,
render_markers=render_markers, marker_style=marker_style,
marker_size=marker_size, marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width, render_legend=False,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size, axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, figure_size=figure_size,
render_grid=render_grid, grid_line_style=grid_line_style,
grid_line_width=grid_line_width)
def displacements(self):
r"""
A list containing the displacement between the shape of each iteration
and the shape of the previous one.
:type: `list` of `ndarray`
"""
return [np.linalg.norm(s1.points - s2.points, axis=1)
for s1, s2 in zip(self.shapes, self.shapes[1:])]
def displacements_stats(self, stat_type='mean'):
r"""
A list containing a statistical metric on the displacements between
the shape of each iteration and the shape of the previous one.
Parameters
----------
stat_type : ``{'mean', 'median', 'min', 'max'}``, optional
Specifies a statistic metric to be extracted from the displacements.
Returns
-------
displacements_stat : `list` of `float`
The statistical metric on the points displacements for each
iteration.
Raises
------
ValueError
type must be 'mean', 'median', 'min' or 'max'
"""
if stat_type == 'mean':
return [np.mean(d) for d in self.displacements()]
elif stat_type == 'median':
return [np.median(d) for d in self.displacements()]
elif stat_type == 'max':
return [np.max(d) for d in self.displacements()]
elif stat_type == 'min':
return [np.min(d) for d in self.displacements()]
else:
raise ValueError("type must be 'mean', 'median', 'min' or 'max'")
def plot_displacements(self, stat_type='mean', figure_id=None,
new_figure=False, render_lines=True, line_colour='b',
line_style='-', line_width=2, render_markers=True,
marker_style='o', marker_size=4,
marker_face_colour='b', marker_edge_colour='k',
marker_edge_width=1., render_axes=True,
axes_font_name='sans-serif', axes_font_size=10,
axes_font_style='normal', axes_font_weight='normal',
axes_x_limits=0., axes_y_limits=None,
axes_x_ticks=None, axes_y_ticks=None,
figure_size=(10, 6), render_grid=True,
grid_line_style='--', grid_line_width=0.5):
r"""
Plot of a statistical metric of the displacement between the shape of
each iteration and the shape of the previous one.
Parameters
----------
stat_type : {``mean``, ``median``, ``min``, ``max``}, optional
Specifies a statistic metric to be extracted from the displacements
(see also `displacements_stats()` method).
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
render_lines : `bool`, optional
If ``True``, the line will be rendered.
line_colour : `colour` or ``None`` (See below), optional
The colour of the line. If ``None``, the colour is sampled from
the jet colormap.
Example `colour` options are ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
line_style : `str` (See below), optional
The style of the lines. Example options::
{-, --, -., :}
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : `str` (See below), optional
The style of the markers.
Example `marker` options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : `colour` or ``None``, optional
The face (filling) colour of the markers. If ``None``, the colour
is sampled from the jet colormap.
Example `colour` options are ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_colour : `colour` or ``None``, optional
The edge colour of the markers. If ``None``, the colour
is sampled from the jet colormap.
Example `colour` options are ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The width of the markers' edge.
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : `str` (See below), optional
The font of the axes.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : `str` (See below), optional
The font style of the axes.
Example options ::
{normal, italic, oblique}
axes_font_weight : `str` (See below), optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the graph as a percentage of the curves' width. If
`tuple` or `list`, then it defines the axis limits. If ``None``,
then the limits are set automatically.
axes_y_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the graph as a percentage of the curves' height.
If `tuple` or `list`, then it defines the axis limits. If ``None``,
then the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) or ``None``, optional
The size of the figure in inches.
render_grid : `bool`, optional
If ``True``, the grid will be rendered.
grid_line_style : ``{'-', '--', '-.', ':'}``, optional
The style of the grid lines.
grid_line_width : `float`, optional
The width of the grid lines.
Returns
-------
renderer : `menpo.visualize.GraphPlotter`
The renderer object.
"""
from menpo.visualize import plot_curve
# set labels
if stat_type == 'max':
name = 'Maximum'
elif stat_type == 'min':
name = 'Minimum'
elif stat_type == 'mean':
name = 'Mean'
elif stat_type == 'median':
name = 'Median'
else:
raise ValueError('stat_type must be one of {max, min, mean, '
'median}.')
y_label = '{} Displacement'.format(name)
title = '{} displacement per Iteration'.format(name)
# plot
displacements = self.displacements_stats(stat_type=stat_type)
return plot_curve(
x_axis=list(range(len(displacements))), y_axis=[displacements],
figure_id=figure_id, new_figure=new_figure, title=title,
x_label='Iteration', y_label=y_label,
axes_x_limits=axes_x_limits, axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks, axes_y_ticks=axes_y_ticks,
render_lines=render_lines, line_colour=line_colour,
line_style=line_style, line_width=line_width,
render_markers=render_markers, marker_style=marker_style,
marker_size=marker_size, marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width, render_legend=False,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size, axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, figure_size=figure_size,
render_grid=render_grid, grid_line_style=grid_line_style,
grid_line_width=grid_line_width)
@property
def costs(self):
r"""
Returns a `list` with the cost per iteration. It returns ``None`` if
the costs are not computed.
:type: `list` of `float` or ``None``
"""
return self._costs
def plot_costs(self, figure_id=None, new_figure=False, render_lines=True,
line_colour='b', line_style='-', line_width=2,
render_markers=True, marker_style='o', marker_size=4,
marker_face_colour='b', marker_edge_colour='k',
marker_edge_width=1., render_axes=True,
axes_font_name='sans-serif', axes_font_size=10,
axes_font_style='normal', axes_font_weight='normal',
axes_x_limits=0., axes_y_limits=None, axes_x_ticks=None,
axes_y_ticks=None, figure_size=(10, 6),
render_grid=True, grid_line_style='--',
grid_line_width=0.5):
r"""
Plot of the cost function evolution at each fitting iteration.
Parameters
----------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
render_lines : `bool`, optional
If ``True``, the line will be rendered.
line_colour : `colour` or ``None``, optional
The colour of the line. If ``None``, the colour is sampled from
the jet colormap.
Example `colour` options are ::
{'r', 'g', 'b', 'c', 'm', 'k', 'w'}
or
(3, ) ndarray
line_style : ``{'-', '--', '-.', ':'}``, optional
The style of the lines.
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : `marker`, optional
The style of the markers.
Example `marker` options ::
{'.', ',', 'o', 'v', '^', '<', '>', '+', 'x', 'D', 'd', 's',
'p', '*', 'h', 'H', '1', '2', '3', '4', '8'}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : `colour` or ``None``, optional
The face (filling) colour of the markers. If ``None``, the colour
is sampled from the jet colormap.
Example `colour` options are ::
{'r', 'g', 'b', 'c', 'm', 'k', 'w'}
or
(3, ) ndarray
marker_edge_colour : `colour` or ``None``, optional
The edge colour of the markers.If ``None``, the colour
is sampled from the jet colormap.
Example `colour` options are ::
{'r', 'g', 'b', 'c', 'm', 'k', 'w'}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The width of the markers' edge.
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See below, optional
The font of the axes.
Example options ::
{'serif', 'sans-serif', 'cursive', 'fantasy', 'monospace'}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{'normal', 'italic', 'oblique'}``, optional
The font style of the axes.
axes_font_weight : See below, optional
The font weight of the axes.
Example options ::
{'ultralight', 'light', 'normal', 'regular', 'book', 'medium',
'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',
'extra bold', 'black'}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the graph as a percentage of the curves' width. If
`tuple` or `list`, then it defines the axis limits. If ``None``,
then the limits are set automatically.
axes_y_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the graph as a percentage of the curves' height.
If `tuple` or `list`, then it defines the axis limits. If ``None``,
then the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) or ``None``, optional
The size of the figure in inches.
render_grid : `bool`, optional
If ``True``, the grid will be rendered.
grid_line_style : ``{'-', '--', '-.', ':'}``, optional
The style of the grid lines.
grid_line_width : `float`, optional
The width of the grid lines.
Returns
-------
renderer : `menpo.visualize.GraphPlotter`
The renderer object.
"""
from menpo.visualize import plot_curve
costs = self.costs
if costs is not None:
return plot_curve(
x_axis=list(range(len(costs))), y_axis=[costs],
figure_id=figure_id, new_figure=new_figure,
title='Cost per Iteration', x_label='Iteration',
y_label='Cost Function', axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits, axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks, render_lines=render_lines,
line_colour=line_colour, line_style=line_style,
line_width=line_width, render_markers=render_markers,
marker_style=marker_style, marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width, render_legend=False,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, figure_size=figure_size,
render_grid=render_grid, grid_line_style=grid_line_style,
grid_line_width=grid_line_width)
else:
raise ValueError('costs are either not returned or not well '
'defined for the selected fitting algorithm')
def view_iterations(self, figure_id=None, new_figure=False,
iters=None, render_image=True, subplots_enabled=False,
channels=None, interpolation='bilinear',
cmap_name=None, alpha=1., masked=True, render_lines=True,
line_style='-', line_width=2, line_colour=None,
render_markers=True, marker_edge_colour=None,
marker_face_colour=None, marker_style='o',
marker_size=4, marker_edge_width=1.,
render_numbering=False,
numbers_horizontal_align='center',
numbers_vertical_align='bottom',
numbers_font_name='sans-serif', numbers_font_size=10,
numbers_font_style='normal',
numbers_font_weight='normal',
numbers_font_colour='k', render_legend=True,
legend_title='', legend_font_name='sans-serif',
legend_font_style='normal', legend_font_size=10,
legend_font_weight='normal', legend_marker_scale=None,
legend_location=2, legend_bbox_to_anchor=(1.05, 1.),
legend_border_axes_pad=None, legend_n_columns=1,
legend_horizontal_spacing=None,
legend_vertical_spacing=None, legend_border=True,
legend_border_padding=None, legend_shadow=False,
legend_rounded_corners=False, render_axes=False,
axes_font_name='sans-serif', axes_font_size=10,
axes_font_style='normal', axes_font_weight='normal',
axes_x_limits=None, axes_y_limits=None,
axes_x_ticks=None, axes_y_ticks=None,
figure_size=(10, 8)):
"""
Visualize the iterations of the fitting process.
Parameters
----------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
iters : `int` or `list` of `int` or ``None``, optional
The iterations to be visualized. If ``None``, then all the
iterations are rendered.
======= ==================== =============
No. Visualised shape Description
======= ==================== =============
0 `self.initial_shape` Initial shape
1 `self.shapes[1]` Iteration 1
i `self.shapes[i]` Iteration i
n_iters `self.final_shape` Final shape
======= ==================== =============
render_image : `bool`, optional
If ``True`` and the image exists, then it gets rendered.
subplots_enabled : `bool`, optional
If ``True``, then the requested final, initial and ground truth
shapes get rendered on separate subplots.
channels : `int` or `list` of `int` or ``all`` or ``None``
If `int` or `list` of `int`, the specified channel(s) will be
rendered. If ``all``, all the channels will be rendered in subplots.
If ``None`` and the image is RGB, it will be rendered in RGB mode.
If ``None`` and the image is not RGB, it is equivalent to ``all``.
interpolation : `str` (See Below), optional
The interpolation used to render the image. For example, if
``bilinear``, the image will be smooth and if ``nearest``, the
image will be pixelated.
Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36, hanning,
hamming, hermite, kaiser, quadric, catrom, gaussian, bessel,
mitchell, sinc, lanczos}
cmap_name: `str`, optional,
If ``None``, single channel and three channel images default
to greyscale and rgb colormaps respectively.
alpha : `float`, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
masked : `bool`, optional
If ``True``, then the image is rendered as masked.
render_lines : `bool` or `list` of `bool`, optional
If ``True``, the lines will be rendered. You can either provide a
single value that will be used for all shapes or a list with a
different value per iteration shape.
line_style : `str` or `list` of `str` (See below), optional
The style of the lines. You can either provide a single value that
will be used for all shapes or a list with a different value per
iteration shape.
Example options::
{-, --, -., :}
line_width : `float` or `list` of `float`, optional
The width of the lines. You can either provide a single value that
will be used for all shapes or a list with a different value per
iteration shape.
line_colour : `colour` or `list` of `colour` (See Below), optional
The colour of the lines. You can either provide a single value that
will be used for all shapes or a list with a different value per
iteration shape.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_markers : `bool` or `list` of `bool`, optional
If ``True``, the markers will be rendered. You can either provide a
single value that will be used for all shapes or a list with a
different value per iteration shape.
marker_style : `str or `list` of `str` (See below), optional
The style of the markers. You can either provide a single value that
will be used for all shapes or a list with a different value per
iteration shape.
Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int` or `list` of `int`, optional
The size of the markers in points. You can either provide a single
value that will be used for all shapes or a list with a different
value per iteration shape.
marker_edge_colour : `colour` or `list` of `colour` (See Below), optional
The edge colour of the markers. You can either provide a single
value that will be used for all shapes or a list with a different
value per iteration shape.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_face_colour : `colour` or `list` of `colour` (See Below), optional
The face (filling) colour of the markers. You can either provide a
single value that will be used for all shapes or a list with a
different value per iteration shape.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float` or `list` of `float`, optional
The width of the markers' edge. You can either provide a single
value that will be used for all shapes or a list with a different
value per iteration shape.
render_numbering : `bool`, optional
If ``True``, the landmarks will be numbered.
numbers_horizontal_align : `str` (See below), optional
The horizontal alignment of the numbers' texts.
Example options ::
{center, right, left}
numbers_vertical_align : `str` (See below), optional
The vertical alignment of the numbers' texts.
Example options ::
{center, top, bottom, baseline}
numbers_font_name : `str` (See below), optional
The font of the numbers.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
numbers_font_size : `int`, optional
The font size of the numbers.
numbers_font_style : ``{normal, italic, oblique}``, optional
The font style of the numbers.
numbers_font_weight : `str` (See below), optional
The font weight of the numbers.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
numbers_font_colour : See Below, optional
The font colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_legend : `bool`, optional
If ``True``, the legend will be rendered.
legend_title : `str`, optional
The title of the legend.
legend_font_name : See below, optional
The font of the legend.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
legend_font_style : `str` (See below), optional
The font style of the legend.
Example options ::
{normal, italic, oblique}
legend_font_size : `int`, optional
The font size of the legend.
legend_font_weight : `str` (See below), optional
The font weight of the legend.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
legend_marker_scale : `float`, optional
The relative size of the legend markers with respect to the original
legend_location : `int`, optional
The location of the legend. The predefined values are:
=============== ==
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== ==
legend_bbox_to_anchor : (`float`, `float`) `tuple`, optional
The bbox that the legend will be anchored.
legend_border_axes_pad : `float`, optional
The pad between the axes and legend border.
legend_n_columns : `int`, optional
The number of the legend's columns.
legend_horizontal_spacing : `float`, optional
The spacing between the columns.
legend_vertical_spacing : `float`, optional
The vertical space between the legend entries.
legend_border : `bool`, optional
If ``True``, a frame will be drawn around the legend.
legend_border_padding : `float`, optional
The fractional whitespace inside the legend border.
legend_shadow : `bool`, optional
If ``True``, a shadow will be drawn behind legend.
legend_rounded_corners : `bool`, optional
If ``True``, the frame's corners will be rounded (fancybox).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : `str` (See below), optional
The font of the axes.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{normal, italic, oblique}``, optional
The font style of the axes.
axes_font_weight : `str` (See below), optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the Image as a percentage of the Image's width. If
`tuple` or `list`, then it defines the axis limits. If ``None``,
then the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the Image as a percentage of the Image's height.
If `tuple` or `list`, then it defines the axis limits. If ``None``,
then the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None`` optional
The size of the figure in inches.
Returns
-------
renderer : `class`
The renderer object.
"""
# Parse iters
iters = _parse_iters(iters, len(self.shapes))
# Create image instance
if self.image is None:
image = Image(np.zeros((10, 10)))
render_image = False
else:
image = Image(self.image.pixels)
# Assign pointclouds to image
n_digits = len(str(self.n_iters))
groups = []
subplots_titles = {}
iters_offset = 1
if self.initial_shape is not None:
iters_offset = 0
for j in iters:
if j == 0 and self.initial_shape is not None:
name = 'Initial'
image.landmarks[name] = self.initial_shape
elif j == len(self.shapes) - 1:
name = 'Final'
image.landmarks[name] = self.final_shape
else:
name = "iteration {:0{}d}".format(j + iters_offset, n_digits)
image.landmarks[name] = self.shapes[j]
groups.append(name)
subplots_titles[name] = name
# Render
return view_image_multiple_landmarks(
image, groups, with_labels=None, figure_id=figure_id,
new_figure=new_figure, subplots_enabled=subplots_enabled,
subplots_titles=subplots_titles, render_image=render_image,
render_landmarks=True, masked=masked,
channels=channels, interpolation=interpolation,
cmap_name=cmap_name, alpha=alpha, image_view=True,
render_lines=render_lines, line_style=line_style,
line_width=line_width, line_colour=line_colour,
render_markers=render_markers, marker_style=marker_style,
marker_size=marker_size, marker_edge_width=marker_edge_width,
marker_edge_colour=marker_edge_colour,
marker_face_colour=marker_face_colour,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_legend=render_legend, legend_title=legend_title,
legend_font_name=legend_font_name,
legend_font_style=legend_font_style,
legend_font_size=legend_font_size,
legend_font_weight=legend_font_weight,
legend_marker_scale=legend_marker_scale,
legend_location=legend_location,
legend_bbox_to_anchor=legend_bbox_to_anchor,
legend_border_axes_pad=legend_border_axes_pad,
legend_n_columns=legend_n_columns,
legend_horizontal_spacing=legend_horizontal_spacing,
legend_vertical_spacing=legend_vertical_spacing,
legend_border=legend_border,
legend_border_padding=legend_border_padding,
legend_shadow=legend_shadow,
legend_rounded_corners=legend_rounded_corners,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size, axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits, axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks, figure_size=figure_size)
class ParametricIterativeResult(NonParametricIterativeResult):
r"""
Class for defining a parametric iterative fitting result, i.e. the
result of a method that optimizes the parameters of a shape model. It
holds the shapes and shape parameters of all the iterations of the
fitting procedure. It can optionally store the image on which the
fitting was applied, as well as its ground truth shape.
.. note:: When using a method with a parametric shape model, the first step
is to **reconstruct the initial shape** using the shape model. The
generated reconstructed shape is then used as initialisation for
the iterative optimisation. This step is not counted in the number
of iterations.
Parameters
----------
shapes : `list` of `menpo.shape.PointCloud`
The `list` of shapes per iteration. Note that the list does not
include the initial shape. However, it includes the reconstruction of
the initial shape. The last member of the list is the final shape.
shape_parameters : `list` of `ndarray`
The `list` of shape parameters per iteration. Note that the list
includes the parameters of the projection of the initial shape. The last
member of the list corresponds to the final shape's parameters. It must
have the same length as `shapes`.
initial_shape : `menpo.shape.PointCloud` or ``None``, optional
The initial shape from which the fitting process started. If
``None``, then no initial shape is assigned.
image : `menpo.image.Image` or `subclass` or ``None``, optional
The image on which the fitting process was applied. Note that a copy
of the image will be assigned as an attribute. If ``None``, then no
image is assigned.
gt_shape : `menpo.shape.PointCloud` or ``None``, optional
The ground truth shape associated with the image. If ``None``, then
no ground truth shape is assigned.
costs : `list` of `float` or ``None``, optional
The `list` of cost per iteration. If ``None``, then it is assumed that
the cost function cannot be computed for the specific algorithm. It must
have the same length as `shapes`.
"""
def __init__(self, shapes, shape_parameters, initial_shape=None, image=None,
gt_shape=None, costs=None):
# Assign shape parameters
self._shape_parameters = shape_parameters
# Get reconstructed initial shape
self._reconstructed_initial_shape = shapes[0]
# Call superclass
super(ParametricIterativeResult, self).__init__(
shapes=shapes, initial_shape=initial_shape, image=image,
gt_shape=gt_shape, costs=costs)
# Correct n_iters. The initial shape's reconstruction should not count
# in the number of iterations.
self._n_iters -= 1
@property
def shapes(self):
r"""
Returns the `list` of shapes obtained at each iteration of the fitting
process. The `list` includes the `initial_shape` (if it exists),
`reconstructed_initial_shape` and `final_shape`.
:type: `list` of `menpo.shape.PointCloud`
"""
return self._shapes
@property
def shape_parameters(self):
r"""
Returns the `list` of shape parameters obtained at each iteration of
the fitting process. The `list` includes the parameters of the
`reconstructed_initial_shape` and `final_shape`.
:type: `list` of ``(n_params,)`` `ndarray`
"""
return self._shape_parameters
@property
def reconstructed_initial_shape(self):
r"""
Returns the initial shape's reconstruction with the shape model that was
used to initialise the iterative optimisation process.
:type: `menpo.shape.PointCloud`
"""
if self.initial_shape is not None:
return self.shapes[1]
else:
return self.shapes[0]
@property
def _reconstruction_indices(self):
r"""
Returns a list with the indices of reconstructed shapes in the `shapes`
list.
:type: `list` of `int`
"""
if self.initial_shape is not None:
return [1]
else:
return [0]
def reconstructed_initial_error(self, compute_error=None):
r"""
Returns the error of the reconstructed initial shape of the fitting
process, if the ground truth shape exists. This is the error computed
based on the `reconstructed_initial_shape`.
Parameters
----------
compute_error: `callable`, optional
Callable that computes the error between the reconstructed initial
and ground truth shapes.
Returns
-------
reconstructed_initial_error : `float`
The error that corresponds to the initial shape's reconstruction.
Raises
------
ValueError
Ground truth shape has not been set, so the reconstructed initial
error cannot be computed
"""
if compute_error is None:
compute_error = euclidean_bb_normalised_error
if self.gt_shape is None:
raise ValueError('Ground truth shape has not been set, so the '
'reconstructed initial error cannot be computed')
else:
return compute_error(self.reconstructed_initial_shape, self.gt_shape)
def view_iterations(self, figure_id=None, new_figure=False,
iters=None, render_image=True, subplots_enabled=False,
channels=None, interpolation='bilinear',
cmap_name=None, alpha=1., masked=True, render_lines=True,
line_style='-', line_width=2, line_colour=None,
render_markers=True, marker_edge_colour=None,
marker_face_colour=None, marker_style='o',
marker_size=4, marker_edge_width=1.,
render_numbering=False,
numbers_horizontal_align='center',
numbers_vertical_align='bottom',
numbers_font_name='sans-serif', numbers_font_size=10,
numbers_font_style='normal',
numbers_font_weight='normal',
numbers_font_colour='k', render_legend=True,
legend_title='', legend_font_name='sans-serif',
legend_font_style='normal', legend_font_size=10,
legend_font_weight='normal', legend_marker_scale=None,
legend_location=2, legend_bbox_to_anchor=(1.05, 1.),
legend_border_axes_pad=None, legend_n_columns=1,
legend_horizontal_spacing=None,
legend_vertical_spacing=None, legend_border=True,
legend_border_padding=None, legend_shadow=False,
legend_rounded_corners=False, render_axes=False,
axes_font_name='sans-serif', axes_font_size=10,
axes_font_style='normal', axes_font_weight='normal',
axes_x_limits=None, axes_y_limits=None,
axes_x_ticks=None, axes_y_ticks=None,
figure_size=(10, 8)):
"""
Visualize the iterations of the fitting process.
Parameters
----------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
iters : `int` or `list` of `int` or ``None``, optional
The iterations to be visualized. If ``None``, then all the
iterations are rendered.
========= ==================================== ======================
No. Visualised shape Description
========= ==================================== ======================
0 `self.initial_shape` Initial shape
1 `self.reconstructed_initial_shape` Reconstructed initial
2 `self.shapes[2]` Iteration 1
i `self.shapes[i]` Iteration i-1
n_iters+1 `self.final_shape` Final shape
========= ==================================== ======================
render_image : `bool`, optional
If ``True`` and the image exists, then it gets rendered.
subplots_enabled : `bool`, optional
If ``True``, then the requested final, initial and ground truth
shapes get rendered on separate subplots.
channels : `int` or `list` of `int` or ``all`` or ``None``
If `int` or `list` of `int`, the specified channel(s) will be
rendered. If ``all``, all the channels will be rendered in subplots.
If ``None`` and the image is RGB, it will be rendered in RGB mode.
If ``None`` and the image is not RGB, it is equivalent to ``all``.
interpolation : `str` (See Below), optional
The interpolation used to render the image. For example, if
``bilinear``, the image will be smooth and if ``nearest``, the
image will be pixelated.
Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36, hanning,
hamming, hermite, kaiser, quadric, catrom, gaussian, bessel,
mitchell, sinc, lanczos}
cmap_name: `str`, optional,
If ``None``, single channel and three channel images default
to greyscale and rgb colormaps respectively.
alpha : `float`, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
masked : `bool`, optional
If ``True``, then the image is rendered as masked.
render_lines : `bool` or `list` of `bool`, optional
If ``True``, the lines will be rendered. You can either provide a
single value that will be used for all shapes or a list with a
different value per iteration shape.
line_style : `str` or `list` of `str` (See below), optional
The style of the lines. You can either provide a single value that
will be used for all shapes or a list with a different value per
iteration shape.
Example options::
{-, --, -., :}
line_width : `float` or `list` of `float`, optional
The width of the lines. You can either provide a single value that
will be used for all shapes or a list with a different value per
iteration shape.
line_colour : `colour` or `list` of `colour` (See Below), optional
The colour of the lines. You can either provide a single value that
will be used for all shapes or a list with a different value per
iteration shape.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_markers : `bool` or `list` of `bool`, optional
If ``True``, the markers will be rendered. You can either provide a
single value that will be used for all shapes or a list with a
different value per iteration shape.
marker_style : `str or `list` of `str` (See below), optional
The style of the markers. You can either provide a single value that
will be used for all shapes or a list with a different value per
iteration shape.
Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int` or `list` of `int`, optional
The size of the markers in points. You can either provide a single
value that will be used for all shapes or a list with a different
value per iteration shape.
marker_edge_colour : `colour` or `list` of `colour` (See Below), optional
The edge colour of the markers. You can either provide a single
value that will be used for all shapes or a list with a different
value per iteration shape.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_face_colour : `colour` or `list` of `colour` (See Below), optional
The face (filling) colour of the markers. You can either provide a
single value that will be used for all shapes or a list with a
different value per iteration shape.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float` or `list` of `float`, optional
The width of the markers' edge. You can either provide a single
value that will be used for all shapes or a list with a different
value per iteration shape.
render_numbering : `bool`, optional
If ``True``, the landmarks will be numbered.
numbers_horizontal_align : `str` (See below), optional
The horizontal alignment of the numbers' texts.
Example options ::
{center, right, left}
numbers_vertical_align : `str` (See below), optional
The vertical alignment of the numbers' texts.
Example options ::
{center, top, bottom, baseline}
numbers_font_name : `str` (See below), optional
The font of the numbers.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
numbers_font_size : `int`, optional
The font size of the numbers.
numbers_font_style : ``{normal, italic, oblique}``, optional
The font style of the numbers.
numbers_font_weight : `str` (See below), optional
The font weight of the numbers.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
numbers_font_colour : See Below, optional
The font colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_legend : `bool`, optional
If ``True``, the legend will be rendered.
legend_title : `str`, optional
The title of the legend.
legend_font_name : See below, optional
The font of the legend.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
legend_font_style : `str` (See below), optional
The font style of the legend.
Example options ::
{normal, italic, oblique}
legend_font_size : `int`, optional
The font size of the legend.
legend_font_weight : `str` (See below), optional
The font weight of the legend.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
legend_marker_scale : `float`, optional
The relative size of the legend markers with respect to the original
legend_location : `int`, optional
The location of the legend. The predefined values are:
=============== ==
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== ==
legend_bbox_to_anchor : (`float`, `float`) `tuple`, optional
The bbox that the legend will be anchored.
legend_border_axes_pad : `float`, optional
The pad between the axes and legend border.
legend_n_columns : `int`, optional
The number of the legend's columns.
legend_horizontal_spacing : `float`, optional
The spacing between the columns.
legend_vertical_spacing : `float`, optional
The vertical space between the legend entries.
legend_border : `bool`, optional
If ``True``, a frame will be drawn around the legend.
legend_border_padding : `float`, optional
The fractional whitespace inside the legend border.
legend_shadow : `bool`, optional
If ``True``, a shadow will be drawn behind legend.
legend_rounded_corners : `bool`, optional
If ``True``, the frame's corners will be rounded (fancybox).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : `str` (See below), optional
The font of the axes.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{normal, italic, oblique}``, optional
The font style of the axes.
axes_font_weight : `str` (See below), optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the Image as a percentage of the Image's width. If
`tuple` or `list`, then it defines the axis limits. If ``None``,
then the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the Image as a percentage of the Image's height.
If `tuple` or `list`, then it defines the axis limits. If ``None``,
then the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None`` optional
The size of the figure in inches.
Returns
-------
renderer : `class`
The renderer object.
"""
# Parse iters
iters = _parse_iters(iters, len(self.shapes))
# Create image instance
if self.image is None:
image = Image(np.zeros((10, 10)))
render_image = False
else:
image = Image(self.image.pixels)
# Assign pointclouds to image
n_digits = len(str(self.n_iters))
groups = []
subplots_titles = {}
iters_offset = 0
if self.initial_shape is not None:
iters_offset = 1
for j in iters:
if j == 0 and self.initial_shape is not None:
name = 'Initial'
image.landmarks[name] = self.initial_shape
elif j in self._reconstruction_indices:
name = 'Reconstruction'
image.landmarks[name] = self.shapes[j]
elif j == len(self.shapes) - 1:
name = 'Final'
image.landmarks[name] = self.final_shape
else:
s = _get_scale_of_iter(j, self._reconstruction_indices)
name = "iteration {:0{}d}".format(j - s + iters_offset, n_digits)
image.landmarks[name] = self.shapes[j]
groups.append(name)
subplots_titles[name] = name
# Render
return view_image_multiple_landmarks(
image, groups, with_labels=None, figure_id=figure_id,
new_figure=new_figure, subplots_enabled=subplots_enabled,
subplots_titles=subplots_titles, render_image=render_image,
render_landmarks=True, masked=masked,
channels=channels, interpolation=interpolation,
cmap_name=cmap_name, alpha=alpha, image_view=True,
render_lines=render_lines, line_style=line_style,
line_width=line_width, line_colour=line_colour,
render_markers=render_markers, marker_style=marker_style,
marker_size=marker_size, marker_edge_width=marker_edge_width,
marker_edge_colour=marker_edge_colour,
marker_face_colour=marker_face_colour,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_legend=render_legend, legend_title=legend_title,
legend_font_name=legend_font_name,
legend_font_style=legend_font_style,
legend_font_size=legend_font_size,
legend_font_weight=legend_font_weight,
legend_marker_scale=legend_marker_scale,
legend_location=legend_location,
legend_bbox_to_anchor=legend_bbox_to_anchor,
legend_border_axes_pad=legend_border_axes_pad,
legend_n_columns=legend_n_columns,
legend_horizontal_spacing=legend_horizontal_spacing,
legend_vertical_spacing=legend_vertical_spacing,
legend_border=legend_border,
legend_border_padding=legend_border_padding,
legend_shadow=legend_shadow,
legend_rounded_corners=legend_rounded_corners,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size, axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits, axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks, figure_size=figure_size)
class MultiScaleNonParametricIterativeResult(NonParametricIterativeResult):
r"""
Class for defining a multi-scale non-parametric iterative fitting result,
i.e. the result of a multi-scale method that does not optimise over a
parametric shape model. It holds the shapes of all the iterations of
the fitting procedure, as well as the scales. It can optionally store the
image on which the fitting was applied, as well as its ground truth shape.
Parameters
----------
results : `list` of :map:`NonParametricIterativeResult`
The `list` of non parametric iterative results per scale.
scales : `list` of `float`
The scale values (normally small to high).
affine_transforms : `list` of `menpo.transform.Affine`
The list of affine transforms per scale that transform the shapes into
the original image space.
scale_transforms : `list` of `menpo.shape.Scale`
The list of scaling transforms per scale.
image : `menpo.image.Image` or `subclass` or ``None``, optional
The image on which the fitting process was applied. Note that a copy
of the image will be assigned as an attribute. If ``None``, then no
image is assigned.
gt_shape : `menpo.shape.PointCloud` or ``None``, optional
The ground truth shape associated with the image. If ``None``, then no
ground truth shape is assigned.
"""
def __init__(self, results, scales, affine_transforms, scale_transforms,
image=None, gt_shape=None):
# Make sure results and scales are iterable
if not isinstance(results, Iterable):
results = [results]
if not isinstance(scales, Iterable):
scales = [scales]
# Check that results and scales have the same length
if len(results) != len(scales):
raise ValueError('results and scales must have equal length ({} '
'!= {})'.format(len(results), len(scales)))
# Get initial shape
initial_shape = None
if results[0].initial_shape is not None:
initial_shape = _rescale_shapes_to_reference(
shapes=[results[0].initial_shape],
affine_transform=affine_transforms[0],
scale_transform=scale_transforms[0])[0]
# Create shapes list and n_iters_per_scale
# If the result object has an initial shape, then it has to be
# removed from the final shapes list
n_iters_per_scale = []
shapes = []
for i in list(range(len(scales))):
n_iters_per_scale.append(results[i].n_iters)
if results[i].initial_shape is None:
shapes += _rescale_shapes_to_reference(
shapes=results[i].shapes,
affine_transform=affine_transforms[i],
scale_transform=scale_transforms[i])
else:
shapes += _rescale_shapes_to_reference(
shapes=results[i].shapes[1:],
affine_transform=affine_transforms[i],
scale_transform=scale_transforms[i])
# Call superclass
super(MultiScaleNonParametricIterativeResult, self).__init__(
shapes=shapes, initial_shape=initial_shape, image=image,
gt_shape=gt_shape)
# Get attributes
self._n_iters_per_scale = n_iters_per_scale
self._n_scales = len(scales)
# Create costs list. We assume that if the costs of the first result
# object is None, then the costs property of all objects is None.
# Similarly, if the costs property of the the first object is not
# None, then the same stands for the rest.
self._costs = None
if results[0].costs is not None:
self._costs = []
for r in results:
self._costs += r.costs
@property
def n_iters_per_scale(self):
r"""
Returns the number of iterations per scale of the fitting process.
:type: `list` of `int`
"""
return self._n_iters_per_scale
@property
def n_scales(self):
r"""
Returns the number of scales used during the fitting process.
:type: `int`
"""
return self._n_scales
class MultiScaleParametricIterativeResult(MultiScaleNonParametricIterativeResult):
r"""
Class for defining a multi-scale parametric iterative fitting result, i.e.
the result of a multi-scale method that optimizes over a parametric shape
model. It holds the shapes of all the iterations of the fitting procedure,
as well as the scales. It can optionally store the image on which the
fitting was applied, as well as its ground truth shape.
.. note:: When using a method with a parametric shape model, the first step
is to **reconstruct the initial shape** using the shape model. The
generated reconstructed shape is then used as initialisation for
the iterative optimisation. This step is not counted in the number
of iterations.
Parameters
----------
results : `list` of :map:`ParametricIterativeResult`
The `list` of parametric iterative results per scale.
scales : `list` of `float`
The scale values (normally small to high).
affine_transforms : `list` of `menpo.transform.Affine`
The list of affine transforms per scale that transform the shapes into
the original image space.
scale_transforms : `list` of `menpo.shape.Scale`
The list of scaling transforms per scale.
image : `menpo.image.Image` or `subclass` or ``None``, optional
The image on which the fitting process was applied. Note that a copy
of the image will be assigned as an attribute. If ``None``, then no
image is assigned.
gt_shape : `menpo.shape.PointCloud` or ``None``, optional
The ground truth shape associated with the image. If ``None``, then no
ground truth shape is assigned.
"""
def __init__(self, results, scales, affine_transforms, scale_transforms,
image=None, gt_shape=None):
# Call superclass
super(MultiScaleParametricIterativeResult, self).__init__(
results=results, scales=scales, affine_transforms=affine_transforms,
scale_transforms=scale_transforms, image=image, gt_shape=gt_shape)
# Create shape parameters, and reconstructed initial shapes lists
self._shape_parameters = []
for r in results:
self._shape_parameters += r.shape_parameters
# Correct n_iters
self._n_iters -= len(scales)
@property
def shape_parameters(self):
r"""
Returns the `list` of shape parameters obtained at each iteration of
the fitting process. The `list` includes the parameters of the
`initial_shape` (if it exists) and `final_shape`.
:type: `list` of ``(n_params,)`` `ndarray`
"""
return self._shape_parameters
@property
def reconstructed_initial_shapes(self):
r"""
Returns the result of the reconstruction step that takes place at each
scale before applying the iterative optimisation.
:type: `list` of `menpo.shape.PointCloud`
"""
ids = self._reconstruction_indices
return [self.shapes[i] for i in ids]
@property
def _reconstruction_indices(self):
r"""
Returns a list with the indices of reconstructed shapes in the `shapes`
list.
:type: `list` of `int`
"""
initial_val = 0
if self.initial_shape is not None:
initial_val = 1
ids = []
for i in list(range(self.n_scales)):
if i == 0:
ids.append(initial_val)
else:
previous_val = ids[i - 1]
ids.append(previous_val + self.n_iters_per_scale[i - 1] + 1)
return ids
def reconstructed_initial_error(self, compute_error=None):
r"""
Returns the error of the reconstructed initial shape of the fitting
process, if the ground truth shape exists. This is the error computed
based on the `reconstructed_initial_shapes[0]`.
Parameters
----------
compute_error: `callable`, optional
Callable that computes the error between the reconstructed initial
and ground truth shapes.
Returns
-------
reconstructed_initial_error : `float`
The error that corresponds to the initial shape's reconstruction.
Raises
------
ValueError
Ground truth shape has not been set, so the reconstructed initial
error cannot be computed
"""
if compute_error is None:
compute_error = euclidean_bb_normalised_error
if self.gt_shape is None:
raise ValueError('Ground truth shape has not been set, so the '
'reconstructed initial error cannot be computed')
else:
return compute_error(self.reconstructed_initial_shapes[0],
self.gt_shape)
def view_iterations(self, figure_id=None, new_figure=False,
iters=None, render_image=True, subplots_enabled=False,
channels=None, interpolation='bilinear',
cmap_name=None, alpha=1., masked=True, render_lines=True,
line_style='-', line_width=2, line_colour=None,
render_markers=True, marker_edge_colour=None,
marker_face_colour=None, marker_style='o',
marker_size=4, marker_edge_width=1.,
render_numbering=False,
numbers_horizontal_align='center',
numbers_vertical_align='bottom',
numbers_font_name='sans-serif', numbers_font_size=10,
numbers_font_style='normal',
numbers_font_weight='normal',
numbers_font_colour='k', render_legend=True,
legend_title='', legend_font_name='sans-serif',
legend_font_style='normal', legend_font_size=10,
legend_font_weight='normal', legend_marker_scale=None,
legend_location=2, legend_bbox_to_anchor=(1.05, 1.),
legend_border_axes_pad=None, legend_n_columns=1,
legend_horizontal_spacing=None,
legend_vertical_spacing=None, legend_border=True,
legend_border_padding=None, legend_shadow=False,
legend_rounded_corners=False, render_axes=False,
axes_font_name='sans-serif', axes_font_size=10,
axes_font_style='normal', axes_font_weight='normal',
axes_x_limits=None, axes_y_limits=None,
axes_x_ticks=None, axes_y_ticks=None,
figure_size=(10, 8)):
"""
Visualize the iterations of the fitting process.
Parameters
----------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
iters : `int` or `list` of `int` or ``None``, optional
The iterations to be visualized. If ``None``, then all the
iterations are rendered.
========= ==================================== ======================
No. Visualised shape Description
========= ==================================== ======================
0 `self.initial_shape` Initial shape
1 `self.reconstructed_initial_shape` Reconstructed initial
2 `self.shapes[2]` Iteration 1
i `self.shapes[i]` Iteration i-1
n_iters+1 `self.final_shape` Final shape
========= ==================================== ======================
render_image : `bool`, optional
If ``True`` and the image exists, then it gets rendered.
subplots_enabled : `bool`, optional
If ``True``, then the requested final, initial and ground truth
shapes get rendered on separate subplots.
channels : `int` or `list` of `int` or ``all`` or ``None``
If `int` or `list` of `int`, the specified channel(s) will be
rendered. If ``all``, all the channels will be rendered in subplots.
If ``None`` and the image is RGB, it will be rendered in RGB mode.
If ``None`` and the image is not RGB, it is equivalent to ``all``.
interpolation : `str` (See Below), optional
The interpolation used to render the image. For example, if
``bilinear``, the image will be smooth and if ``nearest``, the
image will be pixelated.
Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36, hanning,
hamming, hermite, kaiser, quadric, catrom, gaussian, bessel,
mitchell, sinc, lanczos}
cmap_name: `str`, optional,
If ``None``, single channel and three channel images default
to greyscale and rgb colormaps respectively.
alpha : `float`, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
masked : `bool`, optional
If ``True``, then the image is rendered as masked.
render_lines : `bool` or `list` of `bool`, optional
If ``True``, the lines will be rendered. You can either provide a
single value that will be used for all shapes or a list with a
different value per iteration shape.
line_style : `str` or `list` of `str` (See below), optional
The style of the lines. You can either provide a single value that
will be used for all shapes or a list with a different value per
iteration shape.
Example options::
{-, --, -., :}
line_width : `float` or `list` of `float`, optional
The width of the lines. You can either provide a single value that
will be used for all shapes or a list with a different value per
iteration shape.
line_colour : `colour` or `list` of `colour` (See Below), optional
The colour of the lines. You can either provide a single value that
will be used for all shapes or a list with a different value per
iteration shape.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_markers : `bool` or `list` of `bool`, optional
If ``True``, the markers will be rendered. You can either provide a
single value that will be used for all shapes or a list with a
different value per iteration shape.
marker_style : `str or `list` of `str` (See below), optional
The style of the markers. You can either provide a single value that
will be used for all shapes or a list with a different value per
iteration shape.
Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int` or `list` of `int`, optional
The size of the markers in points. You can either provide a single
value that will be used for all shapes or a list with a different
value per iteration shape.
marker_edge_colour : `colour` or `list` of `colour` (See Below), optional
The edge colour of the markers. You can either provide a single
value that will be used for all shapes or a list with a different
value per iteration shape.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_face_colour : `colour` or `list` of `colour` (See Below), optional
The face (filling) colour of the markers. You can either provide a
single value that will be used for all shapes or a list with a
different value per iteration shape.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float` or `list` of `float`, optional
The width of the markers' edge. You can either provide a single
value that will be used for all shapes or a list with a different
value per iteration shape.
render_numbering : `bool`, optional
If ``True``, the landmarks will be numbered.
numbers_horizontal_align : `str` (See below), optional
The horizontal alignment of the numbers' texts.
Example options ::
{center, right, left}
numbers_vertical_align : `str` (See below), optional
The vertical alignment of the numbers' texts.
Example options ::
{center, top, bottom, baseline}
numbers_font_name : `str` (See below), optional
The font of the numbers.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
numbers_font_size : `int`, optional
The font size of the numbers.
numbers_font_style : ``{normal, italic, oblique}``, optional
The font style of the numbers.
numbers_font_weight : `str` (See below), optional
The font weight of the numbers.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
numbers_font_colour : See Below, optional
The font colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_legend : `bool`, optional
If ``True``, the legend will be rendered.
legend_title : `str`, optional
The title of the legend.
legend_font_name : See below, optional
The font of the legend.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
legend_font_style : `str` (See below), optional
The font style of the legend.
Example options ::
{normal, italic, oblique}
legend_font_size : `int`, optional
The font size of the legend.
legend_font_weight : `str` (See below), optional
The font weight of the legend.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
legend_marker_scale : `float`, optional
The relative size of the legend markers with respect to the original
legend_location : `int`, optional
The location of the legend. The predefined values are:
=============== ==
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== ==
legend_bbox_to_anchor : (`float`, `float`) `tuple`, optional
The bbox that the legend will be anchored.
legend_border_axes_pad : `float`, optional
The pad between the axes and legend border.
legend_n_columns : `int`, optional
The number of the legend's columns.
legend_horizontal_spacing : `float`, optional
The spacing between the columns.
legend_vertical_spacing : `float`, optional
The vertical space between the legend entries.
legend_border : `bool`, optional
If ``True``, a frame will be drawn around the legend.
legend_border_padding : `float`, optional
The fractional whitespace inside the legend border.
legend_shadow : `bool`, optional
If ``True``, a shadow will be drawn behind legend.
legend_rounded_corners : `bool`, optional
If ``True``, the frame's corners will be rounded (fancybox).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : `str` (See below), optional
The font of the axes.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{normal, italic, oblique}``, optional
The font style of the axes.
axes_font_weight : `str` (See below), optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the Image as a percentage of the Image's width. If
`tuple` or `list`, then it defines the axis limits. If ``None``,
then the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the Image as a percentage of the Image's height.
If `tuple` or `list`, then it defines the axis limits. If ``None``,
then the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None`` optional
The size of the figure in inches.
Returns
-------
renderer : `class`
The renderer object.
"""
# Parse iters
iters = _parse_iters(iters, len(self.shapes))
# Create image instance
if self.image is None:
image = Image(np.zeros((10, 10)))
render_image = False
else:
image = Image(self.image.pixels)
# Assign pointclouds to image
n_digits = len(str(self.n_iters))
groups = []
subplots_titles = {}
iters_offset = -2
if self.initial_shape is not None:
iters_offset = -1
for j in iters:
if j == 0 and self.initial_shape is not None:
name = 'Initial'
image.landmarks[name] = self.initial_shape
elif j in self._reconstruction_indices:
name = 'Reconstruction'
image.landmarks[name] = self.shapes[j]
elif j == len(self.shapes) - 1:
name = 'Final'
image.landmarks[name] = self.final_shape
else:
s = _get_scale_of_iter(j, self._reconstruction_indices)
name = "iteration {:0{}d}".format(j - s + iters_offset, n_digits)
image.landmarks[name] = self.shapes[j]
groups.append(name)
subplots_titles[name] = name
# Render
return view_image_multiple_landmarks(
image, groups, with_labels=None, figure_id=figure_id,
new_figure=new_figure, subplots_enabled=subplots_enabled,
subplots_titles=subplots_titles, render_image=render_image,
render_landmarks=True, masked=masked,
channels=channels, interpolation=interpolation,
cmap_name=cmap_name, alpha=alpha, image_view=True,
render_lines=render_lines, line_style=line_style,
line_width=line_width, line_colour=line_colour,
render_markers=render_markers, marker_style=marker_style,
marker_size=marker_size, marker_edge_width=marker_edge_width,
marker_edge_colour=marker_edge_colour,
marker_face_colour=marker_face_colour,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_legend=render_legend, legend_title=legend_title,
legend_font_name=legend_font_name,
legend_font_style=legend_font_style,
legend_font_size=legend_font_size,
legend_font_weight=legend_font_weight,
legend_marker_scale=legend_marker_scale,
legend_location=legend_location,
legend_bbox_to_anchor=legend_bbox_to_anchor,
legend_border_axes_pad=legend_border_axes_pad,
legend_n_columns=legend_n_columns,
legend_horizontal_spacing=legend_horizontal_spacing,
legend_vertical_spacing=legend_vertical_spacing,
legend_border=legend_border,
legend_border_padding=legend_border_padding,
legend_shadow=legend_shadow,
legend_rounded_corners=legend_rounded_corners,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size, axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits, axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks, figure_size=figure_size)
|
yuxiang-zhou/menpofit
|
menpofit/result.py
|
Python
|
bsd-3-clause
| 118,801
|
[
"Gaussian"
] |
41b9bff1dc708d3e29418d411cebaeaf4d1632579ceb35170a9860e077d5be6d
|
# Copyright 2000-2001 by Andrew Dalke.
# Revisions copyright 2008 by Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Standard nucleotide and protein alphabets defined by IUPAC."""
from Bio import Alphabet
from Bio.Data import IUPACData
##################### Protein
# From the IUPAC definition at:
# http://www.chem.qmw.ac.uk/iupac/AminoAcid/A2021.html#AA21
assert IUPACData.extended_protein_letters == IUPACData.extended_protein_letters.upper()
class ExtendedIUPACProtein(Alphabet.ProteinAlphabet):
"""Extended uppercase IUPAC protein single letter alphabet including X etc.
In addition to the standard 20 single letter protein codes, this includes:
B = "Asx"; Aspartic acid (R) or Asparagine (N)
X = "Xxx"; Unknown or 'other' amino acid
Z = "Glx"; Glutamic acid (E) or Glutamine (Q)
J = "Xle"; Leucine (L) or Isoleucine (I), used in mass-spec (NMR)
U = "Sec"; Selenocysteine
O = "Pyl"; Pyrrolysine
This alphabet is not intended to be used with X for Selenocysteine
(an ad-hoc standard prior to the IUPAC adoption of U instead).
"""
letters = IUPACData.extended_protein_letters
extended_protein = ExtendedIUPACProtein()
assert IUPACData.protein_letters == IUPACData.protein_letters.upper()
class IUPACProtein(ExtendedIUPACProtein):
"""Uppercase IUPAC protein single letter alphabet of the 20 standard amino acids."""
letters = IUPACData.protein_letters
protein = IUPACProtein()
##################### DNA
# The next two are the IUPAC definitions, from:
# http://www.chem.qmw.ac.uk/iubmb/misc/naseq.html
class IUPACAmbiguousDNA(Alphabet.DNAAlphabet):
"""Uppercase IUPAC ambiguous DNA."""
letters = IUPACData.ambiguous_dna_letters
ambiguous_dna = IUPACAmbiguousDNA()
class IUPACUnambiguousDNA(IUPACAmbiguousDNA):
"""Uppercase IUPAC unambiguous DNA (letters GATC only)."""
letters = IUPACData.unambiguous_dna_letters
unambiguous_dna = IUPACUnambiguousDNA()
# Also from the URL, but not part of the standard
class ExtendedIUPACDNA(Alphabet.DNAAlphabet):
"""Extended IUPAC DNA alphabet.
In addition to the standard letter codes GATC, this includes:
B = 5-bromouridine
D = 5,6-dihydrouridine
S = thiouridine
W = wyosine
"""
letters = IUPACData.extended_dna_letters
extended_dna = ExtendedIUPACDNA()
##################### RNA
class IUPACAmbiguousRNA(Alphabet.RNAAlphabet):
"""Uppercase IUPAC ambiguous RNA."""
letters = IUPACData.ambiguous_rna_letters
ambiguous_rna = IUPACAmbiguousRNA()
class IUPACUnambiguousRNA(IUPACAmbiguousRNA):
"""Uppercase IUPAC unambiguous RNA (letters GAUC only)."""
letters = IUPACData.unambiguous_rna_letters
unambiguous_rna = IUPACUnambiguousRNA()
# are there extended forms?
#class ExtendedIUPACRNA(Alphabet.RNAAlphabet):
# letters = extended_rna_letters
# # B == 5-bromouridine
# # D == 5,6-dihydrouridine
# # S == thiouridine
# # W == wyosine
|
bryback/quickseq
|
genescript/Bio/Alphabet/IUPAC.py
|
Python
|
mit
| 3,110
|
[
"Biopython"
] |
85af8fce9ab62e81e169277a79fd3b0419727b47d9a8f55dac89f533fb5bc9a9
|
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2004-2007 Donald N. Allingham
# Copyright (C) 2010 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
|
sam-m888/gprime
|
gprime/display/__init__.py
|
Python
|
gpl-2.0
| 844
|
[
"Brian"
] |
baaf690ec3fbbcd715ab97e58019941f0ef1c58a56650bf315638dcfc51f14fe
|
../../../../share/pyshared/orca/phonnames.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/phonnames.py
|
Python
|
gpl-3.0
| 44
|
[
"ORCA"
] |
cc90824c2df6dd5b9de33913ae5365c44d8a5908ae450eae21acaf3641412102
|
"""
Views for user API
"""
import json
from django.shortcuts import redirect
from django.utils import dateparse
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import UsageKey
from rest_framework import generics, views
from rest_framework.decorators import api_view
from rest_framework.response import Response
from xblock.fields import Scope
from xblock.runtime import KeyValueStore
from courseware.access import is_mobile_available_for_user
from courseware.courses import get_current_child
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from courseware.views.index import save_positions_recursively_up
from experiments.models import ExperimentData, ExperimentKeyValue
from student.models import CourseEnrollment, User
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from .. import errors
from ..decorators import mobile_course_access, mobile_view
from .serializers import CourseEnrollmentSerializer, UserSerializer
@mobile_view(is_user=True)
class UserDetail(generics.RetrieveAPIView):
"""
**Use Case**
Get information about the specified user and access other resources
the user has permissions for.
Users are redirected to this endpoint after they sign in.
You can use the **course_enrollments** value in the response to get a
list of courses the user is enrolled in.
**Example Request**
GET /api/mobile/v0.5/users/{username}
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* course_enrollments: The URI to list the courses the currently signed
in user is enrolled in.
* email: The email address of the currently signed in user.
* id: The ID of the user.
* name: The full name of the currently signed in user.
* username: The username of the currently signed in user.
"""
queryset = (
User.objects.all().select_related('profile')
)
serializer_class = UserSerializer
lookup_field = 'username'
@mobile_view(is_user=True)
class UserCourseStatus(views.APIView):
"""
**Use Cases**
Get or update the ID of the module that the specified user last
visited in the specified course.
**Example Requests**
GET /api/mobile/v0.5/users/{username}/course_status_info/{course_id}
PATCH /api/mobile/v0.5/users/{username}/course_status_info/{course_id}
**PATCH Parameters**
The body of the PATCH request can include the following parameters.
* last_visited_module_id={module_id}
* modification_date={date}
The modification_date parameter is optional. If it is present, the
update will only take effect if the modification_date in the
request is later than the modification_date saved on the server.
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* last_visited_module_id: The ID of the last module that the user
visited in the course.
* last_visited_module_path: The ID of the modules in the path from the
last visited module to the course module.
"""
http_method_names = ["get", "patch"]
def _last_visited_module_path(self, request, course):
"""
Returns the path from the last module visited by the current user in the given course up to
the course module. If there is no such visit, the first item deep enough down the course
tree is used.
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course.id, course=course
)
path = [course_module]
chapter = get_current_child(course_module, min_depth=2)
if chapter is not None:
path.append(chapter)
section = get_current_child(chapter, min_depth=1)
if section is not None:
path.append(section)
path.reverse()
return path
def _get_course_info(self, request, course):
"""
Returns the course status
"""
path = self._last_visited_module_path(request, course)
path_ids = [unicode(module.location) for module in path]
return Response({
"last_visited_module_id": path_ids[0],
"last_visited_module_path": path_ids,
})
def _update_last_visited_module_id(self, request, course, module_key, modification_date):
"""
Saves the module id if the found modification_date is less recent than the passed modification date
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
try:
module_descriptor = modulestore().get_item(module_key)
except ItemNotFoundError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
module = get_module_for_descriptor(
request.user, request, module_descriptor, field_data_cache, course.id, course=course
)
if modification_date:
key = KeyValueStore.Key(
scope=Scope.user_state,
user_id=request.user.id,
block_scope_id=course.location,
field_name='position'
)
original_store_date = field_data_cache.last_modified(key)
if original_store_date is not None and modification_date < original_store_date:
# old modification date so skip update
return self._get_course_info(request, course)
save_positions_recursively_up(request.user, request, field_data_cache, module, course=course)
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def get(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Get the ID of the module that the specified user last visited in the specified course.
"""
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def patch(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Update the ID of the module that the specified user last visited in the specified course.
"""
module_id = request.data.get("last_visited_module_id")
modification_date_string = request.data.get("modification_date")
modification_date = None
if modification_date_string:
modification_date = dateparse.parse_datetime(modification_date_string)
if not modification_date or not modification_date.tzinfo:
return Response(errors.ERROR_INVALID_MODIFICATION_DATE, status=400)
if module_id:
try:
module_key = UsageKey.from_string(module_id)
except InvalidKeyError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
return self._update_last_visited_module_id(request, course, module_key, modification_date)
else:
# The arguments are optional, so if there's no argument just succeed
return self._get_course_info(request, course)
@mobile_view(is_user=True)
class UserCourseEnrollmentsList(generics.ListAPIView):
"""
**Use Case**
Get information about the courses that the currently signed in user is
enrolled in.
**Example Request**
GET /api/mobile/v0.5/users/{username}/course_enrollments/
**Response Values**
If the request for information about the user is successful, the
request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* certificate: Information about the user's earned certificate in the
course.
* course: A collection of the following data about the course.
* courseware_access: A JSON representation with access information for the course,
including any access errors.
* course_about: The URL to the course about page.
* course_sharing_utm_parameters: Encoded UTM parameters to be included in course sharing url
* course_handouts: The URI to get data for course handouts.
* course_image: The path to the course image.
* course_updates: The URI to get data for course updates.
* discussion_url: The URI to access data for course discussions if
it is enabled, otherwise null.
* end: The end date of the course.
* id: The unique ID of the course.
* name: The name of the course.
* number: The course number.
* org: The organization that created the course.
* start: The date and time when the course starts.
* start_display:
If start_type is a string, then the advertised_start date for the course.
If start_type is a timestamp, then a formatted date for the start of the course.
If start_type is empty, then the value is None and it indicates that the course has not yet started.
* start_type: One of either "string", "timestamp", or "empty"
* subscription_id: A unique "clean" (alphanumeric with '_') ID of
the course.
* video_outline: The URI to get the list of all videos that the user
can access in the course.
* created: The date the course was created.
* is_active: Whether the course is currently active. Possible values
are true or false.
* mode: The type of certificate registration for this course (honor or
certified).
* url: URL to the downloadable version of the certificate, if exists.
"""
queryset = CourseEnrollment.objects.all()
serializer_class = CourseEnrollmentSerializer
lookup_field = 'username'
# In Django Rest Framework v3, there is a default pagination
# class that transmutes the response data into a dictionary
# with pagination information. The original response data (a list)
# is stored in a "results" value of the dictionary.
# For backwards compatibility with the existing API, we disable
# the default behavior by setting the pagination_class to None.
pagination_class = None
def is_org(self, check_org, course_org):
"""
Check course org matches request org param or no param provided
"""
return check_org is None or (check_org.lower() == course_org.lower())
def hide_course_for_enrollment_fee_experiment(self, user, enrollment, experiment_id=9):
"""
Hide enrolled courses from mobile app as part of REV-73/REV-19
"""
course_key = enrollment.course_overview.id
try:
courses_excluded_from_mobile = ExperimentKeyValue.objects.get(
experiment_id=10,
key="mobile_app_exclusion"
).value
courses_excluded_from_mobile = json.loads(courses_excluded_from_mobile.replace('\r', '').replace('\n', ''))
if enrollment.mode == 'audit' and str(course_key) in courses_excluded_from_mobile.keys():
activationTime = dateparse.parse_datetime(courses_excluded_from_mobile[str(course_key)])
if activationTime and enrollment.created and enrollment.created > activationTime:
return True
except (ExperimentKeyValue.DoesNotExist, AttributeError):
pass
try:
ExperimentData.objects.get(
user=user,
experiment_id=experiment_id,
key='enrolled_{0}'.format(course_key),
)
except ExperimentData.DoesNotExist:
return False
try:
ExperimentData.objects.get(
user=user,
experiment_id=experiment_id,
key='paid_{0}'.format(course_key),
)
except ExperimentData.DoesNotExist:
return True
return False
def get_queryset(self):
enrollments = self.queryset.filter(
user__username=self.kwargs['username'],
is_active=True
).order_by('created').reverse()
org = self.request.query_params.get('org', None)
return [
enrollment for enrollment in enrollments
if enrollment.course_overview and self.is_org(org, enrollment.course_overview.org) and
is_mobile_available_for_user(self.request.user, enrollment.course_overview) and
not self.hide_course_for_enrollment_fee_experiment(self.request.user, enrollment)
]
@api_view(["GET"])
@mobile_view()
def my_user_info(request):
"""
Redirect to the currently-logged-in user's info page
"""
return redirect("user-detail", username=request.user.username)
|
teltek/edx-platform
|
lms/djangoapps/mobile_api/users/views.py
|
Python
|
agpl-3.0
| 13,242
|
[
"VisIt"
] |
c74e5d3a359814958d405b628ef50ab698515aee87f4ab664f62674c1cd47c12
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# care_edit_mopac_results.py
#
# Copyright 2016 Carlos Eduardo Sequeiros Borja <casebor@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import argparse
def write_results(models):
""" Function doc
"""
heat_form = []
total_en = []
elect_en = []
core_core_rep = []
for model in range(1, models+1):
with open(args.trajin[:-4]+'_M'+str(model)+'.arc', 'r') as tempfile:
for line in tempfile:
line1 = line.split()
if line[10:27] == 'HEAT OF FORMATION':
energy = "%s %s %s\n"% (str(model), line1[4], line1[5])
heat_form.append(energy)
if line[10:22] == 'TOTAL ENERGY':
energy = "%s %s %s\n"% (str(model), line1[2], line1[3])
total_en.append(energy)
if line[10:27] == 'ELECTRONIC ENERGY':
energy = "%s %s %s\n"% (str(model), line1[2], line1[3])
elect_en.append(energy)
if line[10:29] == 'CORE-CORE REPULSION':
energy = "%s %s %s\n"% (str(model), line1[2], line1[3])
core_core_rep.append(energy)
with open('heat_of_formation.txt', 'w') as results:
results.writelines(heat_form)
with open('total_energy.txt', 'w') as results:
results.writelines(total_en)
with open('electronic_energy.txt', 'w') as results:
results.writelines(elect_en)
with open('core_core_repulsion.txt', 'w') as results:
results.writelines(core_core_rep)
def main():
""" Main function
"""
models = int(args.models)
write_results(models)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Calculates different energy terms using MOPAC package.')
parser.add_argument('-y', '--trajin', required=True, help='Trajectory file name.')
parser.add_argument('-m', '--models', required=True, help='Number of models in the trajectory.')
args = parser.parse_args()
main()
|
casebor/labioscripts
|
python/care_edit_mopac_results.py
|
Python
|
gpl-3.0
| 2,541
|
[
"MOPAC"
] |
4691b8f1ebdbd89ad26f22c2727053136bb16d65031f010a1e1782fa8ec4895f
|
# -----------------------------------------------------------------------------
# Milky Way - Turn based strategy game from Milky Way galaxy
#
# URL: https://github.com/FedericoRessi/milkyway/
# License: GPL3
#
# pylint: disable=invalid-name
# -----------------------------------------------------------------------------
'''
@author: Federico Ressi
'''
import logging
logger = logging.getLogger(__name__)
|
FedericoRessi/milkyway
|
conftest.py
|
Python
|
gpl-3.0
| 421
|
[
"Galaxy"
] |
283070352034d9a3fed4c89dd537b49b0605732fe3c4c1a586dbda0709bb00ff
|
from math import pi
from itertools import izip
import numpy as np
from ase.units import Hartree
from gpaw.xc import XC
from gpaw.xc.sic import SIC
from gpaw.atom.generator import Generator
from gpaw.atom.configurations import parameters
from gpaw.utilities import hartree
class NSCFSIC:
def __init__(self, paw):
self.paw = paw
def calculate(self):
ESIC = 0
xc = self.paw.hamiltonian.xc
assert xc.type == 'LDA'
# Calculate the contribution from the core orbitals
for a in self.paw.density.D_asp:
setup = self.paw.density.setups[a]
# TODO: Use XC which has been used to calculate the actual
# calculation.
# TODO: Loop over setups, not atoms.
print 'Atom core SIC for ', setup.symbol
print '%10s%10s%10s' % ('E_xc[n_i]', 'E_Ha[n_i]', 'E_SIC')
g = Generator(setup.symbol, xcname='LDA', nofiles=True, txt=None)
g.run(**parameters[setup.symbol])
njcore = g.njcore
for f, l, e, u in zip(g.f_j[:njcore], g.l_j[:njcore],
g.e_j[:njcore], g.u_j[:njcore]):
# Calculate orbital density
# NOTE: It's spherically symmetrized!
#n = np.dot(self.f_j,
assert l == 0, ('Not tested for l>0 core states')
na = np.where(abs(u) < 1e-160, 0,u)**2 / (4 * pi)
na[1:] /= g.r[1:]**2
na[0] = na[1]
nb = np.zeros(g.N)
v_sg = np.zeros((2, g.N))
e_g = np.zeros(g.N)
vHr = np.zeros(g.N)
Exc = xc.calculate_spherical(g.rgd, np.array([na, nb]), v_sg)
hartree(0, na * g.r * g.dr, g.beta, g.N, vHr)
EHa = 2*pi*np.dot(vHr*na*g.r , g.dr)
print ('%10.2f%10.2f%10.2f' % (Exc * Hartree, EHa * Hartree,
-f*(EHa+Exc) * Hartree))
ESIC += -f*(EHa+Exc)
sic = SIC(finegrid=True, coulomb_factor=1, xc_factor=1)
sic.initialize(self.paw.density, self.paw.hamiltonian, self.paw.wfs)
sic.set_positions(self.paw.atoms.get_scaled_positions())
print 'Valence electron sic '
print '%10s%10s%10s%10s%10s%10s' % ('spin', 'k-point', 'band',
'E_xc[n_i]', 'E_Ha[n_i]', 'E_SIC')
assert len(self.paw.wfs.kpt_u)==1, ('Not tested for bulk calculations')
for s, spin in sic.spin_s.items():
spin.initialize_orbitals()
spin.update_optimal_states()
spin.update_potentials()
n = 0
for xc, c in zip(spin.exc_m, spin.ecoulomb_m):
print ('%10i%10i%10i%10.2f%10.2f%10.2f' %
(s, 0, n, -xc * Hartree, -c * Hartree,
2 * (xc + c) * Hartree))
n += 1
ESIC += spin.esic
print 'Total correction for self-interaction energy:'
print '%10.2f eV' % (ESIC * Hartree)
print 'New total energy:'
total = (ESIC * Hartree + self.paw.get_potential_energy() +
self.paw.get_reference_energy())
print '%10.2f eV' % total
return total
|
qsnake/gpaw
|
gpaw/utilities/sic.py
|
Python
|
gpl-3.0
| 3,324
|
[
"ASE",
"GPAW"
] |
303581f01f0bc7bb54d7624db00bb75469ba784f247b6c8f72adc899ab76c271
|
from pycparser import parse_file
from minic.c_ast_to_minic import *
from minic.minic_ast import *
from minic.mutils import *
import sys
'''
WSTool extracts the set of written variables from each 'for' or 'while' loop
in a C program.
From the command line in the directory of WSTool.py, execute:
python WSTool.py <name of/path to C program>
After running, the set of written variables will display in the terminal.
'''
class WriteSetVisitor(NodeVisitor):
def __init__(self):
self.writeset = set()
def visit_If(self, ifnode):
wif = WriteSetVisitor()
welse = WriteSetVisitor()
wif.visit(ifnode.iftrue)
welse.visit(ifnode.iffalse)
self.writeset.union(wif.writeset.union(welse.writeset.union()))
def visit_For(self, fornode):
if fornode.init is not None and isinstance(fornode.init, Assignment):
self.writeset.add(fornode.init.lvalue.name)
if fornode.next is not None and isinstance(fornode.next, Assignment):
self.writeset.add(fornode.next.lvalue.name)
if fornode.stmt is not None and isinstance(fornode.stmt, Block):
for node in fornode.stmt.block_items:
if node is not None and isinstance(node, Assignment):
self.writeset.add(node.lvalue.name)
def visit_While(self, whilenode):
if whilenode.stmt is not None and isinstance(whilenode.stmt, Block):
for node in whilenode.stmt.block_items:
if node is not None and isinstance(node, Assignment):
self.writeset.add(node.lvalue.name)
def visit_DoWhile(self, downode):
if downode.stmt is not None and isinstance(downode.stmt, Block):
for node in downode.stmt.block_items:
if node is not None and isinstance(node, Assignment):
self.writeset.add(node.lvalue.name)
class PreLoopAssignmentVisitor(NodeVisitor):
def __init__(self):
self.initial = {}
def visit_PreLoopVisitor(self, funcdefnode):
print("HI") #it doesn't reach here =(....
if funcdefnode is not None and isinstance(funcdefnode, FuncDef):
print("HI2")
for asnode in funcdefnode.body:
if isinstance(asnode, Assignment):
if isinstance(asnode.rvalue, Constant):
self.currdefs[asnode.lvalue.name] = asnode.rvalue.value
else:
if isinstance(declnode.rvalue.right, Constant):
self.currdefs[asnode.lvalue.name] = (asnode.lvalue.name + " = " + asnode.rvalue.left.name + str(asnode.rvalue.op) +str(asnode.rvalue.right.value))
class ReachingDefinitionsVisitor(NodeVisitor):
def __init__(self):
self.currdefs = {}
self.reachingdefs = {}
self.currloop = 0
def visit_For(self, fornode):
if fornode.stmt is not None and isinstance(fornode.stmt, Block):
for node in fornode.stmt.block_items:
if node is not None and isinstance(node, Assignment):
if isinstance(node.rvalue, BinaryOp):
if isinstance(node.rvalue.right, Constant):
self.reachingdefs[node.lvalue.name] = (node.lvalue.name + " = " + node.rvalue.left.name + str(node.rvalue.op) +str(node.rvalue.right.value))
else:
self.reachingdefs[node.lvalue.name] = (node.lvalue.name + "=" + node.rvalue.left.name + str(node.rvalue.op) + str(node.rvalue.right.name))
if isinstance(node.rvalue, Constant):
self.reachingdefs[node.lvalue.name] = node.rvalue.value
if fornode.init is not None and isinstance(fornode.next, Assignment):
self.reachingdefs[fornode.init.lvalue.name] = fornode.next.lvalue.name + " = " + fornode.next.rvalue.left.name + str(node.rvalue.op) + str(fornode.next.rvalue.right.value)
class FuncWriteSetVisitor(NodeVisitor):
def __init__(self):
self.reachingdefsets = {}
def visit_FuncDef(self, funcdef):
wsv = WriteSetVisitor()
wsv.visit(funcdef.body)
self.writesets[funcdef.decl.name] = wsv.writeset
class FuncReachingDefinitionsVisitor(NodeVisitor):
def __init__(self):
self.reachingdefsets = {}
def visit_FuncDef(self, funcdef):
rdv = ReachingDefinitionsVisitor()
rdv.visit(funcdef.body)
self.reachingdefsets[funcdef.decl.name] = rdv.reachingdefs
if __name__ == "__main__":
sys.path.extend(['.', '..'])
#if len(sys.argv) != 2:
#print ("Usage: project1 <input_file>")
#sys.exit(0)
#file = sys.argv[1]
file = "./project1inputs/p1_input1.c"
ast = parse_file(file)
# convert to minic ast
m_ast = transform(ast)
#sast.show()
frd = FuncReachingDefinitionsVisitor()
frd.visit(m_ast)
pla = PreLoopAssignmentVisitor()
pla.visit(m_ast)
for reachingdef, value in pla.initial.items():
print("%s contains %r" % (reachingdef, value))
for fname, reachingdefs in frd.reachingdefsets.items():
print ("%s writes in %r" % (fname, reachingdefs))
|
martylee/Python
|
CSC410-Project-1-master/WSTool.py
|
Python
|
gpl-2.0
| 5,193
|
[
"VisIt"
] |
0fe339afe86763b902d028a84e7e9b387877e1e3c4f55c37529f3420aaa9e446
|
#!/usr/bin/env python
#########################################################################################
#
# Resample data.
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2014 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Julien Cohen-Adad
# Modified: 2014-10-10
#
# About the license: see the file LICENSE.TXT
#########################################################################################
#TODO: pad for c3d!!!!!!
import sys
import os
import getopt
import commands
import sct_utils as sct
import time
from sct_convert import convert
from msct_image import Image
# DEFAULT PARAMETERS
class Param:
## The constructor
def __init__(self):
self.debug = 0
self.fname_data = ''
self.fname_out = ''
self.factor = ''
self.interpolation = 'Linear'
self.file_suffix = 'r' # output suffix
self.verbose = 1
self.remove_tmp_files = 1
# main
#=======================================================================================================================
def main():
# Parameters for debug mode
if param.debug:
print '\n*** WARNING: DEBUG MODE ON ***\n'
# get path of the testing data
status, path_sct_data = commands.getstatusoutput('echo $SCT_TESTING_DATA_DIR')
param.fname_data = path_sct_data+'/fmri/fmri.nii.gz'
param.factor = '2' #'0.5x0.5x1'
param.remove_tmp_files = 0
param.verbose = 1
else:
# Check input parameters
try:
opts, args = getopt.getopt(sys.argv[1:], 'hf:i:o:r:v:x:')
except getopt.GetoptError:
usage()
if not opts:
usage()
for opt, arg in opts:
if opt == '-h':
usage()
elif opt in '-f':
param.factor = arg
elif opt in '-i':
param.fname_data = arg
elif opt in '-o':
param.fname_out = arg
elif opt in '-r':
param.remove_tmp_files = int(arg)
elif opt in '-v':
param.verbose = int(arg)
elif opt in '-x':
param.interpolation = arg
# run main program
resample()
# resample
#=======================================================================================================================
def resample():
fsloutput = 'export FSLOUTPUTTYPE=NIFTI; ' # for faster processing, all outputs are in NIFTI
ext = '.nii'
# display usage if a mandatory argument is not provided
if param.fname_data == '' or param.factor == '':
sct.printv('\nERROR: All mandatory arguments are not provided. See usage (add -h).\n', 1, 'error')
# check existence of input files
sct.printv('\nCheck existence of input files...', param.verbose)
sct.check_file_exist(param.fname_data, param.verbose)
# extract resampling factor
sct.printv('\nParse resampling factor...', param.verbose)
factor_split = param.factor.split('x')
factor = [float(factor_split[i]) for i in range(len(factor_split))]
# check if it has three values
if not len(factor) == 3:
sct.printv('\nERROR: factor should have three dimensions. E.g., 2x2x1.\n', 1, 'error')
else:
fx, fy, fz = [float(factor_split[i]) for i in range(len(factor_split))]
# check interpolation
if param.interpolation not in ['NearestNeighbor','Linear','Cubic','Sinc','Gaussian']:
sct.printv('\nERROR: interpolation should be one of those:NearestNeighbor|Linear|Cubic|Sinc|Gaussian.\n', 1, 'error')
# display input parameters
sct.printv('\nInput parameters:', param.verbose)
sct.printv(' data ..................'+param.fname_data, param.verbose)
sct.printv(' resampling factor .....'+param.factor, param.verbose)
# Extract path/file/extension
path_data, file_data, ext_data = sct.extract_fname(param.fname_data)
path_out, file_out, ext_out = '', file_data, ext_data
# create temporary folder
sct.printv('\nCreate temporary folder...', param.verbose)
path_tmp = sct.slash_at_the_end('tmp.'+time.strftime("%y%m%d%H%M%S"), 1)
sct.run('mkdir '+path_tmp, param.verbose)
# Copying input data to tmp folder and convert to nii
# NB: cannot use c3d here because c3d cannot convert 4D data.
sct.printv('\nCopying input data to tmp folder and convert to nii...', param.verbose)
sct.run('cp '+param.fname_data+' '+path_tmp+'data'+ext_data, param.verbose)
# go to tmp folder
os.chdir(path_tmp)
# convert to nii format
convert('data'+ext_data, 'data.nii')
# Get dimensions of data
sct.printv('\nGet dimensions of data...', param.verbose)
nx, ny, nz, nt, px, py, pz, pt = Image('data.nii').dim
sct.printv(' ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz)+ ' x ' + str(nt), param.verbose)
dim = 4 # by default, will be adjusted later
if nt == 1:
dim = 3
if nz == 1:
dim = 2
sct.run('ERROR (sct_resample): Dimension of input data is different from 3 or 4. Exit program', param.verbose, 'error')
# Calculate new dimensions
sct.printv('\nCalculate new dimensions...', param.verbose)
nx_new = int(round(nx*fx))
ny_new = int(round(ny*fy))
nz_new = int(round(nz*fz))
sct.printv(' ' + str(nx_new) + ' x ' + str(ny_new) + ' x ' + str(nz_new)+ ' x ' + str(nt), param.verbose)
# if dim=4, split data
if dim == 4:
# Split into T dimension
sct.printv('\nSplit along T dimension...', param.verbose)
from sct_split_data import split_data
split_data('data.nii', 3, '_T')
elif dim == 3:
# rename file to have compatible code with 4d
status, output = sct.run('cp data.nii data_T0000.nii', param.verbose)
for it in range(nt):
# identify current volume
file_data_splitT = 'data_T'+str(it).zfill(4)
file_data_splitT_resample = file_data_splitT+'r'
# resample volume
sct.printv(('\nResample volume '+str((it+1))+'/'+str(nt)+':'), param.verbose)
sct.run('isct_c3d '+file_data_splitT+ext+' -interpolation '+param.interpolation+' -resample '+str(nx_new)+'x'+str(ny_new)+'x'+str(nz_new)+'vox -o '+file_data_splitT_resample+ext)
# pad data (for ANTs)
# # TODO: check if need to pad also for the estimate_and_apply
# if program == 'ants' and todo == 'estimate' and slicewise == 0:
# sct.run('isct_c3d '+file_data_splitT_num[it]+' -pad 0x0x3vox 0x0x3vox 0 -o '+file_data_splitT_num[it]+'_pad.nii')
# file_data_splitT_num[it] = file_data_splitT_num[it]+'_pad'
# merge data back along T
file_data_resample = file_data+param.file_suffix
sct.printv('\nMerge data back along T...', param.verbose)
from sct_concat_data import concat_data
import glob
concat_data(glob.glob('data_T*r.nii'), file_data_resample, dim=3)
# come back to parent folder
os.chdir('..')
# Generate output files
sct.printv('\nGenerate output files...', param.verbose)
if not param.fname_out:
param.fname_out = path_out+file_out+param.file_suffix+ext_out
sct.generate_output_file(path_tmp+file_data_resample+ext, param.fname_out)
# Remove temporary files
if param.remove_tmp_files == 1:
print('\nRemove temporary files...')
sct.run('rm -rf '+path_tmp, param.verbose)
# to view results
sct.printv('\nDone! To view results, type:', param.verbose)
sct.printv('fslview '+param.fname_out+' &', param.verbose, 'info')
print
# Print usage
# ==========================================================================================
def usage():
print """
"""+os.path.basename(__file__)+"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Part of the Spinal Cord Toolbox <https://sourceforge.net/projects/spinalcordtoolbox>
DESCRIPTION
Anisotropic resampling of 3D or 4D data.
USAGE
"""+os.path.basename(__file__)+""" -i <data> -f <factor>
MANDATORY ARGUMENTS
-i <data> image to segment. Can be 2D, 3D or 4D.
-f <fxxfyxfz> resampling factor in each of the first 3 dimensions (x,y,z). Separate with "x"
For 2x upsampling, set to 2. For 2x downsampling set to 0.5
OPTIONAL ARGUMENTS
-o <file> output file name.
-r {0,1} remove temporary files. Default="""+str(param_debug.remove_tmp_files)+"""
-v {0,1} verbose. Default="""+str(param_debug.verbose)+"""
-h help. Show this message
EXAMPLE
"""+os.path.basename(__file__)+""" -i dwi.nii.gz -f 0.5x0.5x1\n"""
# exit program
sys.exit(2)
#=======================================================================================================================
# Start program
#=======================================================================================================================
if __name__ == "__main__":
# initialize parameters
param = Param()
param_debug = Param()
# call main function
main()
|
3324fr/spinalcordtoolbox
|
dev/sct_resample/sct_resample_old.py
|
Python
|
mit
| 9,124
|
[
"Gaussian"
] |
0f09e4ea0a220395322d0b47620b58c64efb8f69c59315f0cccc5c8b097e7a22
|
#
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of IGE - Outer Space.
#
# IGE - Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# IGE - Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IGE - Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from ige.IObject import IObject
from ige import *
from Const import *
from xml.dom.minidom import Node, parse
from ige.IObject import IObject
from ISystem import ISystem
from ige.IDataHolder import IDataHolder
import os.path, time, Utils, Rules
from ige import log
import Scanner
class IGalaxy(IObject):
typeID = T_GALAXY
forums = {"PUBLIC": 112, "NEWS": 112}
def init(self, obj):
IObject.init(self, obj)
#
obj.x = 0.0
obj.y = 0.0
obj.radius = 0.0
obj.centerWeight = 250.0
obj.systems = []
obj.startingPos = []
obj.numOfStartPos = 0
obj.timeEnabled = 0 # TODO change to 0
obj.timeStopped = 0
obj.creationTime = 0.0
obj.imperator = OID_NONE
obj.description = ""
# electromagnetic radiation
obj.emrLevel = 1.0
obj.emrTrend = 1.0
obj.emrTime = 0
def update(self, tran, obj):
# check existence of all systems
if 0:
for systemID in obj.systems:
if not tran.db.has_key(systemID):
log.debug("CONSISTENCY - system %d from galaxy %d does not exists" % (systemID, obj.oid))
elif tran.db[systemID].type not in (T_SYSTEM, T_WORMHOLE):
log.debug("CONSISTENCY - system %d from galaxy %d is not a T_SYSTEM or T_WORMHOLE" % (systemID, obj.oid))
# validate starting positions
for planetID in obj.startingPos[:]:
if not tran.db.has_key(planetID):
log.debug("REMOVING nonexistent obj from start pos", planetID)
obj.startingPos.remove(planetID)
planet = tran.db[planetID]
if planet.type != T_PLANET:
log.debug("REMOVING ??? from start pos", planetID)
obj.startingPos.remove(planetID)
#if planet.plType != "E":
# log.debug("REMOVING non earth planet from start pos", planetID)
# obj.startingPos.remove(planetID)
# check compOf
if not tran.db.has_key(obj.compOf) or tran.db[obj.compOf].type != T_UNIVERSE:
log.debug("CONSISTENCY invalid compOf for galaxy", obj.oid, obj.compOf)
update.public = 0
def getReferences(self, tran, obj):
return obj.systems
getReferences.public = 0
def processINITPhase(self, tran, obj, data):
# compute emr level
turn = tran.db[OID_UNIVERSE].turn
obj.emrTime -= 1
if obj.emrTime <= 0:
modulo = turn % Rules.emrPeriod
for season in Rules.emrSeasons:
if modulo >= season.startTime and modulo <= season.endTime:
log.debug("EMR - season", season.name)
obj.emrTrend = Utils.rand(int(season.emrLevelMin * 100), int(season.emrLevelMax * 100) + 1) / 100.0
obj.emrTime = Utils.rand(Rules.emrMinDuration, Rules.emrMaxDuration)
log.debug("EMR - trend, time", obj.emrTrend, obj.emrTime)
message = {
"sender": "GNC",
"senderID": obj.oid,
"forum": "NEWS",
"data": (obj.oid, MSG_GNC_EMR_FORECAST, obj.oid, turn, (obj.emrTrend, obj.emrTime)),
"topic": "EVENT",
}
self.cmd(obj).sendMsg(tran, obj, message)
break
elif obj.emrLevel >= obj.emrTrend:
obj.emrLevel -= Utils.rand(1, 6) / 100.0
elif obj.emrLevel <= obj.emrTrend:
obj.emrLevel += Utils.rand(1, 6) / 100.0
#
if not obj.timeStopped:
if not obj.timeEnabled:
self.cmd(obj).enableTime(tran, obj)
else:
self.cmd(obj).enableTime(tran, obj, force = 1, enable = 0)
# remove old messages
self.cmd(obj).deleteOldMsgs(tran, obj)
return obj.systems
processINITPhase.public = 1
processINITPhase.accLevel = AL_ADMIN
def processPRODPhase(self, tran, obj, data):
if obj.timeEnabled and not obj.timeStopped:
return obj.systems
processPRODPhase.public = 1
processPRODPhase.accLevel = AL_ADMIN
def processACTIONPhase(self, tran, obj, data):
if obj.timeEnabled and not obj.timeStopped:
return obj.systems
processACTIONPhase.public = 1
processACTIONPhase.accLevel = AL_ADMIN
def processSCAN2Phase(self, tran, obj, data):
# compute scanner for all objects on the map
playerMap = Scanner.computeMap(self, tran, obj)
# distribute map
for playerID, map in playerMap.iteritems():
player = tran.db[playerID]
self.cmd(player).mergeScannerMap(tran, player, map)
return
processSCAN2Phase.public = 1
processSCAN2Phase.accLevel = AL_ADMIN
def processBATTLEPhase(self, tran, obj, data):
if obj.timeEnabled and not obj.timeStopped:
return obj.systems
processBATTLEPhase.public = 1
processBATTLEPhase.accLevel = AL_ADMIN
def processFINALPhase(self, tran, obj, data):
# validate starting positions
remove = []
for planetID in obj.startingPos:
planet = tran.db[planetID]
if planet.owner != OID_NONE:
remove.append(planetID)
for planetID in remove:
obj.startingPos.remove(planetID)
#
#if obj.timeEnabled and not obj.timeStopped:
return obj.systems
processFINALPhase.public = 1
processFINALPhase.accLevel = AL_ADMIN
def processFINAL2Phase(self, tran, obj, data):
# save history file
turn = tran.db[OID_UNIVERSE].turn
# TODO: reneable history when it's optimized
if turn % 6 == 0 and False:
log.debug("Saving history for galaxy", obj.oid, obj.name)
fh = open("var/history/galaxy%d-%06d.xml" % (obj.oid, turn), "w+")
print >>fh, '<?xml version="1.0" encoding="UTF-8"?>'
print >>fh, '<history turn="%d" galaxy="%d" name="%s">' % (turn, obj.oid, obj.name)
# save systems and owners
players = {}
print >>fh, ' <systems>'
for systemID in obj.systems:
system = tran.db[systemID]
owners = {}
for planetID in system.planets:
ownerID = tran.db[planetID].owner
if ownerID != OID_NONE:
owners[ownerID] = tran.db[ownerID].name
players[ownerID] = None
print >>fh, ' <sys x="%.2f" y="%.2f" name="%s" owners="%s"/>' % (
system.x,
system.y,
system.name,
",".join(owners.values())
)
print >>fh, ' </systems>'
# stats
print >>fh, ' <stats>'
for playerID in players:
player = tran.db[playerID]
print >>fh, ' <pl name="%s" pop="%d" planets="%d" stucts="%d" cp="%d" mp="%d" rp="%d"/>'% (
player.name,
player.stats.storPop,
player.stats.planets,
player.stats.structs,
player.stats.prodProd,
player.stats.fleetPwr,
player.stats.prodSci,
)
print >>fh, ' </stats>'
print >>fh, '</history>'
processFINAL2Phase.public = 1
processFINAL2Phase.accLevel = AL_ADMIN
def loadFromXML(self, tran, obj, file, galID, x, y, name):
log.message('IGalaxy', 'Parsing XML file...')
dom = parse(os.path.join('data', file))
log.message('IGalaxy', 'XML file parsed.')
assert dom.documentElement.tagName == 'universe'
for node in dom.documentElement.childNodes:
if node.nodeType == Node.ELEMENT_NODE and node.tagName == 'galaxy':
if node.getAttribute('id') == galID:
self.loadDOMNode(tran, obj, node, x, y, name)
self.connectWormHoles(tran, obj)
return SUCC
raise GameException('No such id %s in resource' % galID)
loadFromXML.public = 1
loadFromXML.accLevel = AL_ADMIN
def loadDOMNode(self, tran, obj, node, x, y, name):
obj.name = name
obj.x = float(x)
obj.y = float(y)
xoff = x - float(node.getAttribute('x'))
yoff = y - float(node.getAttribute('y'))
obj.creationTime = time.time()
for elem in node.childNodes:
if elem.nodeType == Node.ELEMENT_NODE:
name = elem.tagName
if name == 'properties':
self.loadDOMAttrs(obj, elem)
elif name == 'system':
system = tran.db[self.createSystem(tran, obj)]
self.cmd(system).loadDOMNode(tran, system, xoff, yoff, elem)
elif name == 'hole':
wormHole = tran.db[self.createWormHole(tran, obj)]
self.cmd(wormHole).loadDOMNode(tran, wormHole, xoff, yoff, elem)
else:
raise GameException('Unknown element %s' % name)
return SUCC
def connectWormHoles(self, tran, obj):
wormHoles = {}
for holeID in obj.systems:
wormHole = tran.db[holeID]
if wormHole.type == T_WORMHOLE:
wormHoles[wormHole.name] = holeID
for holeID in obj.systems:
wormHole = tran.db[holeID]
if wormHole.type != T_WORMHOLE:
continue
if len(wormHole.destination) == 0:
raise GameException('Wrong WormHole(%d) definition' % holeID)
if wormHole.destination == wormHole.name:
raise GameException('Same destination as position for WormHole(%d)' % holeID)
destinationOid = wormHoles[wormHole.destination]
if destinationOid == OID_NONE:
raise GameException('WormHole(%d) has wrong destination ''%s''' % (holeID, wormHole.destination))
wormHole.destinationOid = destinationOid
def createSystem(self, tran, obj):
system = self.new(T_SYSTEM)
system.compOf = obj.oid
oid = tran.db.create(system)
obj.systems.append(oid)
return oid
def createWormHole(self, tran, galaxy):
hole = self.new(T_WORMHOLE)
hole.compOf = galaxy.oid
oid = tran.db.create(hole)
galaxy.systems.append(oid)
return oid
def enableTime(self, tran, obj, force = 0, deleteSP = 0, enable = 1):
log.debug('IGalaxy', 'Checking for time...')
if not force:
if obj.timeEnabled:
return
canRun = 0
# there must be at least 1/2 positions already assigned
#if len(obj.startingPos) <= obj.numOfStartPos / 2 and obj.creationTime < time.time() - 2 * 24 * 3600:
# log.debug("Half galaxy populated", len(obj.startingPos), obj.numOfStartPos)
# canRun = 1
# at least two days must pass from creation
if not obj.startingPos:
log.debug("All positions taken, starting galaxy")
canRun = 1
if obj.creationTime < time.time() - 2 * 24 * 3600:
log.debug("Two days passed", obj.creationTime, time.time() - 2 * 24 * 3600)
canRun = 1
if not canRun:
return 0
# ok, enable time
log.message('IGalaxy', 'Enabling time for', obj.oid)
obj.timeEnabled = enable
# close galaxy
if deleteSP:
obj.startingPos = []
# load new galaxy
# TODO
# enable time for players
for systemID in obj.systems:
system = tran.db[systemID]
for planetID in system.planets:
planet = tran.db[planetID]
if planet.owner != OID_NONE:
player = tran.db[planet.owner]
if player.timeEnabled != enable:
player.timeEnabled = enable
player.lastLogin = time.time()
if enable:
Utils.sendMessage(tran, player, MSG_ENABLED_TIME, player.oid, None)
enableTime.public = 1
enableTime.accLevel = AL_ADMIN
def delete(self, tran, obj):
log.debug(obj.oid, "GALAXY - delete")
universe = tran.db[OID_UNIVERSE]
# delete systems and planets
for systemID in obj.systems:
log.debug("Deleting system", systemID)
system = tran.db[systemID]
log.debug("-- planets", system.planets)
log.debug("-- fleets", system.fleets, system.closeFleets)
for planetID in system.planets[:]:
planet = tran.db[planetID]
self.cmd(planet).changeOwner(tran, planet, OID_NONE, force = 1)
del tran.db[planetID]
for fleetID in system.closeFleets[:]:
fleet = tran.db[fleetID]
# this will modify system fleet and closeFleets attrs
self.cmd(fleet).disbandFleet(tran, fleet)
del tran.db[systemID]
# delete all remaining fleets
for playerID in universe.players[:]:
player = tran.db[playerID]
if obj.oid not in player.galaxies:
continue
if player.fleets:
log.debug("Player %d has still fleets" % playerID, player.name, player.fleets)
for fleetID in player.fleets:
fleet = tran.db[fleetID]
log.debug("Fleet NOT DELETED:", fleet)
if player.planets:
log.debug("Player %d has still planets" % playerID, player.name, player.planets)
self.cmd(player).delete(tran, player)
# remove this galaxy from the list of the galaxies
tran.db[OID_UNIVERSE].galaxies.remove(obj.oid)
del tran.db[obj.oid]
return 1
delete.public = 1
delete.accLevel = AL_ADMIN
def getPublicInfo(self, tran, obj):
result = IDataHolder()
result.oid = obj.oid
result.type = obj.type
result.name = obj.name
result.emrLevel = obj.emrLevel
return result
getPublicInfo.public = 1
getPublicInfo.accLevel = AL_NONE
def getDescription(self,obj):
return obj.description
getPublicInfo.public = 1
getPublicInfo.accLevel = AL_NONE
def setupEnvironment(self, tran, obj):
# check required players
universe = tran.db[OID_UNIVERSE]
players = {}
for playerType in (T_AIRENPLAYER, T_AIMUTPLAYER, T_AIPIRPLAYER, T_AIEDENPLAYER):
found = 0
for playerID in universe.players:
player = tran.db[playerID]
if obj.oid in player.galaxies and player.type == playerType:
players[playerType] = player
found = 1
break
if found:
continue
# create new player
log.debug("Creating new player", playerType)
player = self.new(playerType)
self.cmd(player).register(tran, player)
player.galaxies.append(obj.oid)
players[playerType] = player
# great we have all players - scan planets
for systemID in obj.systems:
system = tran.db[systemID]
for planetID in system.planets:
planet = tran.db[planetID]
# renegades
if planet.plStratRes in (SR_TL1A, SR_TL1B) and planet.owner == OID_NONE:
# populate planet
log.debug("Adding renegades", planetID)
self.cmd(planet).changeOwner(tran, planet, players[T_AIRENPLAYER].oid, 1)
planet.slots.append(Utils.newStructure(tran, Rules.Tech.RENEGADEBASE, planet.owner))
planet.storPop = 3000
# pirates
if planet.plStratRes in (SR_TL3A, SR_TL3B, SR_TL3C) and planet.owner == OID_NONE:
# populate planet
log.debug("Adding pirates", planetID)
self.cmd(planet).changeOwner(tran, planet, players[T_AIPIRPLAYER].oid, 1)
planet.slots.append(Utils.newStructure(tran, Rules.Tech.PIRATEBASE, planet.owner))
planet.storPop = 5000
if planet.plSlots > 1:
planet.slots.append(Utils.newStructure(tran, Rules.Tech.PIRATEDEN, planet.owner))
planet.storPop += 1000
# EDEN
if planet.plStratRes in (SR_TL5A, SR_TL5B, SR_TL5C) and planet.owner == OID_NONE:
# populate planet
log.debug("Adding EDEN", planetID)
self.cmd(planet).changeOwner(tran, planet, players[T_AIEDENPLAYER].oid, 1)
if planet.plSlots < 2:
planet.plSlots = 2
if planet.plMaxSlots < 2:
planet.plMaxSlots = 2
if planet.plDiameter < 2000:
planet.plDiameter = 2000
planet.slots.append(Utils.newStructure(tran, Rules.Tech.EDENBASE, planet.owner))
planet.slots.append(Utils.newStructure(tran, Rules.Tech.EDENSTATION, planet.owner))
planet.storPop = 3000
# mutants
if planet.plDisease != 0 and planet.owner == OID_NONE:
# populate planet
log.debug("Adding mutants", planetID)
self.cmd(planet).changeOwner(tran, planet, players[T_AIMUTPLAYER].oid, 1)
planet.slots.append(Utils.newStructure(tran, Rules.Tech.MUTANTBASE, planet.owner))
planet.storPop = 3000
setupEnvironment.public = 1
setupEnvironment.accLevel = AL_ADMIN
## messaging
def canGetMsgs(self, tran, obj, oid):
return 1
canGetMsgs.public = 0
def canSendMsg(self, tran, obj, oid, forum):
if forum == "PUBLIC":
return 1
elif forum == "NEWS":
return 1
return 0
canSendMsg.public = 0
|
mozts2005/OuterSpace
|
client-pygame/libsrvr/ige/ospace/IGalaxy.py
|
Python
|
gpl-2.0
| 16,124
|
[
"Galaxy"
] |
dd549826166779c80604033e33544bfa1b4bf3e2622d6b195718cb168713f187
|
# -*- coding: utf-8 -*-
"""
Tests for the text processor.
"""
from __future__ import unicode_literals
import json
from unittest import TestCase, main
from datetime import datetime
import mock
from nose.tools import * # noqa (PEP8 asserts)
from nose.plugins.attrib import attr
import nltk
from textblob.compat import PY2, unicode, basestring, binary_type
import textblob as tb
from textblob.np_extractors import ConllExtractor, FastNPExtractor
from textblob.taggers import NLTKTagger, PatternTagger
from textblob.tokenizers import WordTokenizer, SentenceTokenizer
from textblob.sentiments import NaiveBayesAnalyzer, PatternAnalyzer
from textblob.parsers import PatternParser
from textblob.classifiers import NaiveBayesClassifier
import textblob.wordnet as wn
Synset = nltk.corpus.reader.Synset
train = [
('I love this sandwich.', 'pos'),
('This is an amazing place!', 'pos'),
("What a truly amazing dinner.", 'pos'),
('I feel very good about these beers.', 'pos'),
('This is my best work.', 'pos'),
("What an awesome view", 'pos'),
('I do not like this restaurant', 'neg'),
('I am tired of this stuff.', 'neg'),
("I can't deal with this", 'neg'),
('He is my sworn enemy!', 'neg'),
('My boss is horrible.', 'neg')
]
test = [
('The beer was good.', 'pos'),
('I do not enjoy my job', 'neg'),
("I ain't feeling dandy today.", 'neg'),
("I feel amazing!", 'pos'),
('Gary is a friend of mine.', 'pos'),
("I can't believe I'm doing this.", 'neg')
]
classifier = NaiveBayesClassifier(train)
class WordListTest(TestCase):
def setUp(self):
self.words = 'Beautiful is better than ugly'.split()
self.mixed = ['dog', 'dogs', 'blob', 'Blobs', 'text']
def test_len(self):
wl = tb.WordList(['Beautiful', 'is', 'better'])
assert_equal(len(wl), 3)
def test_slicing(self):
wl = tb.WordList(self.words)
first = wl[0]
assert_true(isinstance(first, tb.Word))
assert_equal(first, 'Beautiful')
dogs = wl[0:2]
assert_true(isinstance(dogs, tb.WordList))
assert_equal(dogs, tb.WordList(['Beautiful', 'is']))
def test_repr(self):
wl = tb.WordList(['Beautiful', 'is', 'better'])
if PY2:
assert_equal(repr(wl), "WordList([u'Beautiful', u'is', u'better'])")
else:
assert_equal(repr(wl), "WordList(['Beautiful', 'is', 'better'])")
def test_slice_repr(self):
wl = tb.WordList(['Beautiful', 'is', 'better'])
if PY2:
assert_equal(repr(wl[:2]), "WordList([u'Beautiful', u'is'])")
else:
assert_equal(repr(wl[:2]), "WordList(['Beautiful', 'is'])")
def test_str(self):
wl = tb.WordList(self.words)
assert_equal(str(wl), str(self.words))
def test_singularize(self):
wl = tb.WordList(['dogs', 'cats', 'buffaloes', 'men', 'mice', 'offspring'])
assert_equal(wl.singularize(),
tb.WordList(['dog', 'cat', 'buffalo', 'man', 'mouse', 'offspring']))
def test_pluralize(self):
wl = tb.WordList(['dog', 'cat', 'buffalo', 'antelope'])
assert_equal(wl.pluralize(), tb.WordList(['dogs', 'cats', 'buffaloes', 'antelope']))
@attr('slow')
def test_lemmatize(self):
wl = tb.WordList(["cat", "dogs", "oxen"])
assert_equal(wl.lemmatize(), tb.WordList(['cat', 'dog', 'ox']))
def test_stem(self): #only PorterStemmer tested
wl = tb.WordList(["cat", "dogs", "oxen"])
assert_equal(wl.stem(), tb.WordList(['cat', 'dog', 'oxen']))
def test_upper(self):
wl = tb.WordList(self.words)
assert_equal(wl.upper(), tb.WordList([w.upper() for w in self.words]))
def test_lower(self):
wl = tb.WordList(['Zen', 'oF', 'PYTHON'])
assert_equal(wl.lower(), tb.WordList(['zen', 'of', 'python']))
def test_count(self):
wl = tb.WordList(['monty', 'python', 'Python', 'Monty'])
assert_equal(wl.count('monty'), 2)
assert_equal(wl.count('monty', case_sensitive=True), 1)
assert_equal(wl.count('mon'), 0)
def test_convert_to_list(self):
wl = tb.WordList(self.words)
assert_equal(list(wl), self.words)
def test_append(self):
wl = tb.WordList(['dog'])
wl.append("cat")
assert_true(isinstance(wl[1], tb.Word))
wl.append(('a', 'tuple'))
assert_true(isinstance(wl[2], tuple))
def test_extend(self):
wl = tb.WordList(["cats", "dogs"])
wl.extend(["buffalo", 4])
assert_true(isinstance(wl[2], tb.Word))
assert_true(isinstance(wl[3], int))
def test_pop(self):
wl = tb.WordList(['cats', 'dogs'])
assert_equal(wl.pop(), tb.Word('dogs'))
assert_raises(IndexError, wl.__getitem__, 1)
assert_equal(wl.pop(), tb.Word('cats'))
assert_equal(len(wl), 0)
assert_raises(IndexError, wl.pop)
def test_setitem(self):
wl = tb.WordList(['I', 'love', 'JavaScript'])
wl[2] = tb.Word('Python')
assert_equal(wl[2], tb.Word('Python'))
def test_reverse(self):
wl = tb.WordList(['head', 'shoulders', 'knees', 'toes'])
wl.reverse()
assert_equal(list(wl), ['toes', 'knees', 'shoulders', 'head'])
class SentenceTest(TestCase):
def setUp(self):
self.raw_sentence = \
'Any place with frites and Belgian beer has my vote.'
self.sentence = tb.Sentence(self.raw_sentence)
def test_repr(self):
# In Py2, repr returns bytestring
if PY2:
assert_equal(repr(self.sentence),
b"Sentence(\"{0}\")".format(binary_type(self.raw_sentence)))
# In Py3, returns text type string
else:
assert_equal(repr(self.sentence), 'Sentence("{0}")'.format(self.raw_sentence))
def test_stripped_sentence(self):
assert_equal(self.sentence.stripped,
'any place with frites and belgian beer has my vote')
def test_len(self):
assert_equal(len(self.sentence), len(self.raw_sentence))
@attr('slow')
def test_dict(self):
sentence_dict = self.sentence.dict
assert_equal(sentence_dict, {
'raw': self.raw_sentence,
'start_index': 0,
'polarity': 0.0,
'subjectivity': 0.0,
'end_index': len(self.raw_sentence) - 1,
'stripped': 'any place with frites and belgian beer has my vote',
'noun_phrases': self.sentence.noun_phrases,
})
def test_pos_tags(self):
then1 = datetime.now()
tagged = self.sentence.pos_tags
now1 = datetime.now()
t1 = now1 - then1
then2 = datetime.now()
tagged = self.sentence.pos_tags
now2 = datetime.now()
t2 = now2 - then2
# Getting the pos tags the second time should be faster
# because they were stored as an attribute the first time
assert_true(t2 < t1)
assert_equal(tagged,
[('Any', 'DT'), ('place', 'NN'), ('with', 'IN'),
('frites', 'NNS'), ('and', 'CC'), ('Belgian', 'JJ'),
('beer', 'NN'), ('has', 'VBZ'), ('my', 'PRP$'),
('vote', 'NN')]
)
@attr('slow')
def test_noun_phrases(self):
nps = self.sentence.noun_phrases
assert_equal(nps, ['belgian beer'])
def test_words_are_word_objects(self):
words = self.sentence.words
assert_true(isinstance(words[0], tb.Word))
assert_equal(words[1].pluralize(), 'places')
def test_string_equality(self):
assert_equal(self.sentence, 'Any place with frites and Belgian beer has my vote.')
@mock.patch('textblob.translate.Translator.translate')
def test_translate(self, mock_translate):
mock_translate.return_value = 'Esta es una frase.'
blob = tb.Sentence("This is a sentence.")
translated = blob.translate(to="es")
assert_true(isinstance(translated, tb.Sentence))
assert_equal(translated, "Esta es una frase.")
def test_correct(self):
blob = tb.Sentence("I havv bad speling.")
assert_true(isinstance(blob.correct(), tb.Sentence))
assert_equal(blob.correct(), tb.Sentence("I have bad spelling."))
blob = tb.Sentence("I havv \ngood speling.")
assert_true(isinstance(blob.correct(), tb.Sentence))
assert_equal(blob.correct(), tb.Sentence("I have \ngood spelling."))
@mock.patch('textblob.translate.Translator.translate')
def test_translate_detects_language_by_default(self, mock_translate):
text = unicode("ذات سيادة كاملة")
mock_translate.return_value = "With full sovereignty"
blob = tb.TextBlob(text)
blob.translate()
assert_true(mock_translate.called_once_with(text, from_lang='auto'))
class TextBlobTest(TestCase):
def setUp(self):
self.text = \
"""Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!"""
self.blob = tb.TextBlob(self.text)
self.np_test_text = '''
Python is a widely used general-purpose, high-level programming language.
Its design philosophy emphasizes code readability, and its syntax allows
programmers to express concepts in fewer
lines of code than would be possible in languages such as C.
The language provides constructs intended to enable clear programs on both a small and large scale.
Python supports multiple programming paradigms, including object-oriented,
imperative and functional programming or procedural styles.
It features a dynamic type system and automatic memory management and
has a large and comprehensive standard library. Like other dynamic languages, Python is often used as a scripting language,
but is also used in a wide range of non-scripting contexts.
Using third-party tools, Python code can be packaged into standalone executable
programs. Python interpreters are available for many operating systems. CPython, the reference implementation of Python, is free and open source software and h
as a community-based development model, as do nearly all of its alternative implementations. CPython
is managed by the non-profit Python Software Foundation.'''
self.np_test_blob = tb.TextBlob(self.np_test_text)
self.short = "Beautiful is better than ugly. "
self.short_blob = tb.TextBlob(self.short)
def test_init(self):
blob = tb.TextBlob('Wow I love this place. It really rocks my socks!')
assert_equal(len(blob.sentences), 2)
assert_equal(blob.sentences[1].stripped, 'it really rocks my socks')
assert_equal(blob.string, blob.raw)
# Must initialize with a string
assert_raises(TypeError, tb.TextBlob.__init__, ['invalid'])
def test_string_equality(self):
blob = tb.TextBlob("Textblobs should be equal to strings.")
assert_equal(blob, "Textblobs should be equal to strings.")
def test_string_comparison(self):
blob = tb.TextBlob("apple")
assert_true(blob < "banana")
assert_true(blob > 'aardvark')
def test_hash(self):
blob = tb.TextBlob('apple')
assert_equal(hash(blob), hash('apple'))
assert_not_equal(hash(blob), hash('banana'))
def test_stripped(self):
blob = tb.TextBlob("Um... well this ain't right.!..")
assert_equal(blob.stripped, "um well this aint right")
def test_ngrams(self):
blob = tb.TextBlob("I am eating a pizza.")
three_grams = blob.ngrams()
assert_equal(three_grams, [
tb.WordList(('I', 'am', 'eating')),
tb.WordList(('am', 'eating', 'a')),
tb.WordList(('eating', 'a', 'pizza'))
])
four_grams = blob.ngrams(n=4)
assert_equal(four_grams, [
tb.WordList(('I', 'am', 'eating', 'a')),
tb.WordList(('am', 'eating', 'a', 'pizza'))
])
def test_clean_html(self):
html = '<b>Python</b> is a widely used <a href="/wiki/General-purpose_programming_language" title="General-purpose programming language">general-purpose</a>, <a href="/wiki/High-level_programming_language" title="High-level programming language">high-level programming language</a>.'
assert_raises(NotImplementedError, lambda: tb.TextBlob(html, clean_html=True))
def test_sentences(self):
blob = self.blob
assert_equal(len(blob.sentences), 19)
assert_true(isinstance(blob.sentences[0], tb.Sentence))
def test_senences_with_space_before_punctuation(self):
text = "Uh oh. This sentence might cause some problems. : Now we're ok."
b = tb.TextBlob(text)
assert_equal(len(b.sentences), 3)
def test_sentiment_of_foreign_text(self):
blob = tb.TextBlob(u'Nous avons cherch\xe9 un motel dans la r\xe9gion de '
'Madison, mais les motels ne sont pas nombreux et nous avons '
'finalement choisi un Motel 6, attir\xe9s par le bas '
'prix de la chambre.')
assert_true(isinstance(blob.sentiment[0], float))
def test_iter(self):
for i, letter in enumerate(self.short_blob):
assert_equal(letter, self.short[i])
def test_raw_sentences(self):
blob = tb.TextBlob(self.text)
assert_equal(len(blob.raw_sentences), 19)
assert_equal(blob.raw_sentences[0], "Beautiful is better than ugly.")
def test_blob_with_no_sentences(self):
text = "this isn't really a sentence it's just a long string of words"
blob = tb.TextBlob(text)
# the blob just has one sentence
assert_equal(len(blob.sentences), 1)
# the start index is 0, the end index is len(text) - 1
assert_equal(blob.sentences[0].start_index, 0)
assert_equal(blob.sentences[0].end_index, len(text))
def test_len(self):
blob = tb.TextBlob('lorem ipsum')
assert_equal(len(blob), len('lorem ipsum'))
def test_repr(self):
blob1 = tb.TextBlob('lorem ipsum')
if PY2:
assert_equal(repr(blob1), b"TextBlob(\"{0}\")".format(binary_type('lorem ipsum')))
else:
assert_equal(repr(blob1), "TextBlob(\"{0}\")".format('lorem ipsum'))
def test_cmp(self):
blob1 = tb.TextBlob('lorem ipsum')
blob2 = tb.TextBlob('lorem ipsum')
blob3 = tb.TextBlob('dolor sit amet')
assert_true(blob1 == blob2) # test ==
assert_true(blob1 > blob3) # test >
assert_true(blob1 >= blob3) # test >=
assert_true(blob3 < blob2) # test <
assert_true(blob3 <= blob2) # test <=
def test_invalid_comparison(self):
blob = tb.TextBlob("one")
if PY2:
# invalid comparison returns False
assert_false(blob < 2)
else:
# invalid comparison raises Error
with assert_raises(TypeError):
blob < 2
def test_words(self):
blob = tb.TextBlob('Beautiful is better than ugly. '
'Explicit is better than implicit.')
assert_true(isinstance(blob.words, tb.WordList))
assert_equal(blob.words, tb.WordList([
'Beautiful',
'is',
'better',
'than',
'ugly',
'Explicit',
'is',
'better',
'than',
'implicit',
]))
short = tb.TextBlob("Just a bundle of words")
assert_equal(short.words, tb.WordList([
'Just', 'a', 'bundle', 'of', 'words'
]))
def test_words_includes_apostrophes_in_contractions(self):
blob = tb.TextBlob("Let's test this.")
assert_equal(blob.words, tb.WordList(['Let', "'s", "test", "this"]))
blob2 = tb.TextBlob("I can't believe it's not butter.")
assert_equal(blob2.words, tb.WordList(['I', 'ca', "n't", "believe",
'it', "'s", "not", "butter"]))
def test_pos_tags(self):
blob = tb.TextBlob('Simple is better than complex. '
'Complex is better than complicated.')
assert_equal(blob.pos_tags, [
('Simple', 'NN'),
('is', 'VBZ'),
('better', 'JJR'),
('than', 'IN'),
('complex', 'JJ'),
('Complex', 'NNP'),
('is', 'VBZ'),
('better', 'JJR'),
('than', 'IN'),
('complicated', 'VBN'),
])
def test_tags(self):
assert_equal(self.blob.tags, self.blob.pos_tags)
def test_tagging_nonascii(self):
b = tb.TextBlob('Learn how to make the five classic French mother sauces: '
'Béchamel, Tomato Sauce, Espagnole, Velouté and Hollandaise.')
tags = b.tags
assert_true(isinstance(tags[0][0], unicode))
def test_pos_tags_includes_one_letter_articles(self):
blob = tb.TextBlob("This is a sentence.")
assert_equal(blob.pos_tags[2][0], 'a')
@attr('slow')
def test_np_extractor_defaults_to_fast_tagger(self):
text = "Python is a high-level scripting language."
blob1 = tb.TextBlob(text)
assert_true(isinstance(blob1.np_extractor, FastNPExtractor))
def test_np_extractor_is_shared_among_instances(self):
blob1 = tb.TextBlob("This is one sentence")
blob2 = tb.TextBlob("This is another sentence")
assert_true(blob1.np_extractor is blob2.np_extractor)
@attr('slow')
def test_can_use_different_np_extractors(self):
e = ConllExtractor()
text = "Python is a high-level scripting language."
blob = tb.TextBlob(text)
blob.np_extractor = e
assert_true(isinstance(blob.np_extractor, ConllExtractor))
def test_can_use_different_sentanalyzer(self):
blob = tb.TextBlob("I love this car", analyzer=NaiveBayesAnalyzer())
assert_true(isinstance(blob.analyzer, NaiveBayesAnalyzer))
@attr("slow")
def test_discrete_sentiment(self):
blob = tb.TextBlob("I feel great today.", analyzer=NaiveBayesAnalyzer())
assert_equal(blob.sentiment[0], 'pos')
def test_can_get_subjectivity_and_polarity_with_different_analyzer(self):
blob = tb.TextBlob("I love this car.", analyzer=NaiveBayesAnalyzer())
pattern = PatternAnalyzer()
assert_equal(blob.polarity, pattern.analyze(str(blob))[0])
assert_equal(blob.subjectivity, pattern.analyze(str(blob))[1])
def test_pos_tagger_defaults_to_pattern(self):
blob = tb.TextBlob("some text")
assert_true(isinstance(blob.pos_tagger, NLTKTagger))
def test_pos_tagger_is_shared_among_instances(self):
blob1 = tb.TextBlob("This is one sentence")
blob2 = tb.TextBlob("This is another sentence.")
assert_true(blob1.pos_tagger is blob2.pos_tagger)
def test_can_use_different_pos_tagger(self):
tagger = NLTKTagger()
blob = tb.TextBlob("this is some text", pos_tagger=tagger)
assert_true(isinstance(blob.pos_tagger, NLTKTagger))
@attr('slow')
def test_can_pass_np_extractor_to_constructor(self):
e = ConllExtractor()
blob = tb.TextBlob('Hello world!', np_extractor=e)
assert_true(isinstance(blob.np_extractor, ConllExtractor))
def test_getitem(self):
blob = tb.TextBlob('lorem ipsum')
assert_equal(blob[0], 'l')
assert_equal(blob[0:5], tb.TextBlob('lorem'))
def test_upper(self):
blob = tb.TextBlob('lorem ipsum')
assert_true(is_blob(blob.upper()))
assert_equal(blob.upper(), tb.TextBlob('LOREM IPSUM'))
def test_upper_and_words(self):
blob = tb.TextBlob('beautiful is better')
assert_equal(blob.upper().words, tb.WordList(['BEAUTIFUL', 'IS', 'BETTER'
]))
def test_lower(self):
blob = tb.TextBlob('Lorem Ipsum')
assert_true(is_blob(blob.lower()))
assert_equal(blob.lower(), tb.TextBlob('lorem ipsum'))
def test_find(self):
text = 'Beautiful is better than ugly.'
blob = tb.TextBlob(text)
assert_equal(blob.find('better', 5, len(blob)), text.find('better', 5,
len(text)))
def test_rfind(self):
text = 'Beautiful is better than ugly. '
blob = tb.TextBlob(text)
assert_equal(blob.rfind('better'), text.rfind('better'))
def test_startswith(self):
blob = tb.TextBlob(self.text)
assert_true(blob.startswith('Beautiful'))
assert_true(blob.starts_with('Beautiful'))
def test_endswith(self):
blob = tb.TextBlob(self.text)
assert_true(blob.endswith('of those!'))
assert_true(blob.ends_with('of those!'))
def test_split(self):
blob = tb.TextBlob('Beautiful is better')
assert_equal(blob.split(), tb.WordList(['Beautiful', 'is', 'better']))
def test_title(self):
blob = tb.TextBlob('Beautiful is better')
assert_equal(blob.title(), tb.TextBlob('Beautiful Is Better'))
def test_format(self):
blob = tb.TextBlob('1 + 1 = {0}')
assert_equal(blob.format(1 + 1), tb.TextBlob('1 + 1 = 2'))
assert_equal('1 + 1 = {0}'.format(tb.TextBlob('2')), '1 + 1 = 2')
def test_using_indices_for_slicing(self):
blob = tb.TextBlob("Hello world. How do you do?")
sent1, sent2 = blob.sentences
assert_equal(blob[sent1.start:sent1.end], tb.TextBlob(str(sent1)))
assert_equal(blob[sent2.start:sent2.end], tb.TextBlob(str(sent2)))
def test_indices_with_only_one_sentences(self):
blob = tb.TextBlob("Hello world.")
sent1 = blob.sentences[0]
assert_equal(blob[sent1.start:sent1.end], tb.TextBlob(str(sent1)))
def test_indices_with_multiple_puncutations(self):
blob = tb.TextBlob("Hello world. How do you do?! This has an ellipses...")
sent1, sent2, sent3 = blob.sentences
assert_equal(blob[sent2.start:sent2.end], tb.TextBlob("How do you do?!"))
assert_equal(blob[sent3.start:sent3.end], tb.TextBlob("This has an ellipses..."))
def test_indices_short_names(self):
blob = tb.TextBlob(self.text)
last_sentence = blob.sentences[len(blob.sentences) - 1]
assert_equal(last_sentence.start, last_sentence.start_index)
assert_equal(last_sentence.end, last_sentence.end_index)
def test_replace(self):
blob = tb.TextBlob('textblob is a blobby blob')
assert_equal(blob.replace('blob', 'bro'),
tb.TextBlob('textbro is a broby bro'))
assert_equal(blob.replace('blob', 'bro', 1),
tb.TextBlob('textbro is a blobby blob'))
def test_join(self):
l = ['explicit', 'is', 'better']
wl = tb.WordList(l)
assert_equal(tb.TextBlob(' ').join(l), tb.TextBlob('explicit is better'))
assert_equal(tb.TextBlob(' ').join(wl), tb.TextBlob('explicit is better'))
@attr('slow')
def test_blob_noun_phrases(self):
noun_phrases = self.np_test_blob.noun_phrases
assert_true('python' in noun_phrases)
assert_true('design philosophy' in noun_phrases)
def test_word_counts(self):
blob = tb.TextBlob('Buffalo buffalo ate my blue buffalo.')
assert_equal(dict(blob.word_counts), {
'buffalo': 3,
'ate': 1,
'my': 1,
'blue': 1
})
assert_equal(blob.word_counts['buffalo'], 3)
assert_equal(blob.words.count('buffalo'), 3)
assert_equal(blob.words.count('buffalo', case_sensitive=True), 2)
assert_equal(blob.word_counts['blue'], 1)
assert_equal(blob.words.count('blue'), 1)
assert_equal(blob.word_counts['ate'], 1)
assert_equal(blob.words.count('ate'), 1)
assert_equal(blob.word_counts['buff'], 0)
assert_equal(blob.words.count('buff'), 0)
blob2 = tb.TextBlob(self.text)
assert_equal(blob2.words.count('special'), 2)
assert_equal(blob2.words.count('special', case_sensitive=True), 1)
@attr('slow')
def test_np_counts(self):
# Add some text so that we have a noun phrase that
# has a frequency greater than 1
noun_phrases = self.np_test_blob.noun_phrases
assert_equal(noun_phrases.count('python'), 6)
assert_equal(self.np_test_blob.np_counts['python'], noun_phrases.count('python'))
assert_equal(noun_phrases.count('cpython'), 2)
assert_equal(noun_phrases.count('not found'), 0)
def test_add(self):
blob1 = tb.TextBlob('Hello, world! ')
blob2 = tb.TextBlob('Hola mundo!')
# Can add two text blobs
assert_equal(blob1 + blob2, tb.TextBlob('Hello, world! Hola mundo!'))
# Can also add a string to a tb.TextBlob
assert_equal(blob1 + 'Hola mundo!',
tb.TextBlob('Hello, world! Hola mundo!'))
# Or both
assert_equal(blob1 + blob2 + ' Goodbye!',
tb.TextBlob('Hello, world! Hola mundo! Goodbye!'))
# operands must be strings
assert_raises(TypeError, blob1.__add__, ['hello'])
def test_unicode(self):
blob = tb.TextBlob(self.text)
assert_equal(str(blob), str(self.text))
def test_strip(self):
text = 'Beautiful is better than ugly. '
blob = tb.TextBlob(text)
assert_true(is_blob(blob))
assert_equal(blob.strip(), tb.TextBlob(text.strip()))
def test_strip_and_words(self):
blob = tb.TextBlob('Beautiful is better! ')
assert_equal(blob.strip().words, tb.WordList(['Beautiful', 'is', 'better'
]))
def test_index(self):
blob = tb.TextBlob(self.text)
assert_equal(blob.index('Namespaces'), self.text.index('Namespaces'))
def test_sentences_after_concatenation(self):
blob1 = tb.TextBlob('Beautiful is better than ugly. ')
blob2 = tb.TextBlob('Explicit is better than implicit.')
concatenated = blob1 + blob2
assert_equal(len(concatenated.sentences), 2)
def test_sentiment(self):
positive = tb.TextBlob('This is the best, most amazing '
'text-processing library ever!')
assert_true(positive.sentiment[0] > 0.0)
negative = tb.TextBlob("bad bad bitches that's my muthufuckin problem.")
assert_true(negative.sentiment[0] < 0.0)
zen = tb.TextBlob(self.text)
assert_equal(round(zen.sentiment[0], 1), 0.2)
def test_subjectivity(self):
positive = tb.TextBlob("Oh my god this is so amazing! I'm so happy!")
assert_true(isinstance(positive.subjectivity, float))
assert_true(positive.subjectivity > 0)
def test_polarity(self):
positive = tb.TextBlob("Oh my god this is so amazing! I'm so happy!")
assert_true(isinstance(positive.polarity, float))
assert_true(positive.polarity > 0)
def test_sentiment_of_emoticons(self):
b1 = tb.TextBlob("Faces have values =)")
b2 = tb.TextBlob("Faces have values")
assert_true(b1.sentiment[0] > b2.sentiment[0])
def test_bad_init(self):
assert_raises(TypeError, lambda: tb.TextBlob(['bad']))
assert_raises(ValueError, lambda: tb.TextBlob("this is fine",
np_extractor="this is not fine"))
assert_raises(ValueError, lambda: tb.TextBlob("this is fine",
pos_tagger="this is not fine"))
def test_in(self):
blob = tb.TextBlob('Beautiful is better than ugly. ')
assert_true('better' in blob)
assert_true('fugly' not in blob)
@attr('slow')
def test_json(self):
blob = tb.TextBlob('Beautiful is better than ugly. ')
assert_equal(blob.json, blob.to_json())
blob_dict = json.loads(blob.json)[0]
assert_equal(blob_dict['stripped'], 'beautiful is better than ugly')
assert_equal(blob_dict['noun_phrases'], blob.sentences[0].noun_phrases)
assert_equal(blob_dict['start_index'], blob.sentences[0].start)
assert_equal(blob_dict['end_index'], blob.sentences[0].end)
assert_almost_equal(blob_dict['polarity'],
blob.sentences[0].polarity, places=4)
assert_almost_equal(blob_dict['subjectivity'],
blob.sentences[0].subjectivity, places=4)
def test_words_are_word_objects(self):
words = self.blob.words
assert_true(isinstance(words[0], tb.Word))
def test_words_have_pos_tags(self):
blob = tb.TextBlob('Simple is better than complex. '
'Complex is better than complicated.')
first_word, first_tag = blob.pos_tags[0]
assert_true(isinstance(first_word, tb.Word))
assert_equal(first_word.pos_tag, first_tag)
def test_tokenizer_defaults_to_word_tokenizer(self):
assert_true(isinstance(self.blob.tokenizer, WordTokenizer))
def test_tokens_property(self):
assert_true(self.blob.tokens,
tb.WordList(WordTokenizer().tokenize(self.text)))
def test_can_use_an_different_tokenizer(self):
tokenizer = nltk.tokenize.TabTokenizer()
blob = tb.TextBlob("This is\ttext.", tokenizer=tokenizer)
assert_equal(blob.tokens, tb.WordList(["This is", "text."]))
def test_tokenize_method(self):
tokenizer = nltk.tokenize.TabTokenizer()
blob = tb.TextBlob("This is\ttext.")
# If called without arguments, should default to WordTokenizer
assert_equal(blob.tokenize(), tb.WordList(["This", "is", "text", "."]))
# Pass in the TabTokenizer
assert_equal(blob.tokenize(tokenizer), tb.WordList(["This is", "text."]))
def test_tags_uses_custom_tokenizer(self):
tokenizer = nltk.tokenize.regexp.WordPunctTokenizer()
blob = tb.TextBlob("Good muffins cost $3.88\nin New York.", tokenizer=tokenizer)
assert_equal(blob.tags, [(u'Good', u'JJ'), (u'muffins', u'NNS'), (u'cost', u'VBP'), (
u'3', u'CD'), (u'88', u'CD'), (u'in', u'IN'), (u'New', u'NNP'), (u'York', u'NNP')])
def test_tags_with_custom_tokenizer_and_tagger(self):
tokenizer = nltk.tokenize.regexp.WordPunctTokenizer()
tagger = tb.taggers.PatternTagger()
blob = tb.TextBlob("Good muffins cost $3.88\nin New York.", tokenizer=tokenizer, pos_tagger=tagger)
# PatterTagger takes raw text (not tokens), and handles tokenization itself.
assert_equal(blob.tags, [(u'Good', u'JJ'), (u'muffins', u'NNS'), (u'cost', u'NN'),
(u'3.88', u'CD'), (u'in', u'IN'), (u'New', u'NNP'), (u'York', u'NNP')])
@mock.patch('textblob.translate.Translator.translate')
def test_translate(self, mock_translate):
mock_translate.return_value = 'Esta es una frase.'
blob = tb.TextBlob("This is a sentence.")
translated = blob.translate(to="es")
assert_true(isinstance(translated, tb.TextBlob))
assert_equal(translated, "Esta es una frase.")
mock_translate.return_value = 'This is a sentence.'
es_blob = tb.TextBlob("Esta es una frase.")
to_en = es_blob.translate(from_lang="es", to="en")
assert_equal(to_en, "This is a sentence.")
@mock.patch('textblob.translate.Translator.detect')
def test_detect(self, mock_detect):
mock_detect.return_value = 'es'
es_blob = tb.TextBlob("Hola")
assert_equal(es_blob.detect_language(), "es")
assert_true(mock_detect.called_once_with('Hola'))
def test_correct(self):
blob = tb.TextBlob("I havv bad speling.")
assert_true(isinstance(blob.correct(), tb.TextBlob))
assert_equal(blob.correct(), tb.TextBlob("I have bad spelling."))
blob2 = tb.TextBlob("I am so exciited!!!")
assert_equal(blob2.correct(), "I am so excited!!!")
blob3 = tb.TextBlob("The meaning of life is 42.0.")
assert_equal(blob3.correct(), "The meaning of life is 42.0.")
blob4 = tb.TextBlob("?")
assert_equal(blob4.correct(), "?")
blob5 = tb.TextBlob("I can't spel")
assert_equal(blob5.correct(), "I can't spell")
blob6 = tb.TextBlob("I cann't \nspel")
assert_equal(blob6.correct(), "I can't \nspell")
# From a user-submitted bug
text = "Before you embark on any of this journey, write a quick " + \
"high-level test that demonstrates the slowness. " + \
"You may need to introduce some minimum set of data to " + \
"reproduce a significant enough slowness."
blob5 = tb.TextBlob(text)
assert_equal(blob5.correct(), text)
text = "Word list! :\n" + \
"\t* spelling\n" + \
"\t* well"
blob6 = tb.TextBlob(text)
assert_equal(blob6.correct(), text)
def test_parse(self):
blob = tb.TextBlob("And now for something completely different.")
assert_equal(blob.parse(), PatternParser().parse(blob.string))
def test_passing_bad_init_params(self):
tagger = PatternTagger()
assert_raises(ValueError,
lambda: tb.TextBlob("blah", parser=tagger))
assert_raises(ValueError,
lambda: tb.TextBlob("blah", np_extractor=tagger))
assert_raises(ValueError,
lambda: tb.TextBlob("blah", tokenizer=tagger))
assert_raises(ValueError,
lambda: tb.TextBlob("blah", analyzer=tagger))
analyzer = PatternAnalyzer
assert_raises(ValueError,
lambda: tb.TextBlob("blah", pos_tagger=analyzer))
def test_classify(self):
blob = tb.TextBlob("This is an amazing library. What an awesome classifier!",
classifier=classifier)
assert_equal(blob.classify(), 'pos')
for s in blob.sentences:
assert_equal(s.classify(), 'pos')
def test_classify_without_classifier(self):
blob = tb.TextBlob("This isn't gonna be good")
assert_raises(NameError,
lambda: blob.classify())
def test_word_string_type_after_pos_tags_is_str(self):
text = 'John is a cat'
blob = tb.TextBlob(text)
for word, part_of_speech in blob.pos_tags:
assert type(word.string) is unicode
class WordTest(TestCase):
def setUp(self):
self.cat = tb.Word('cat')
self.cats = tb.Word('cats')
def test_init(self):
tb.Word("cat")
assert_true(isinstance(self.cat, tb.Word))
word = tb.Word('cat', 'NN')
assert_equal(word.pos_tag, 'NN')
def test_singularize(self):
singular = self.cats.singularize()
assert_equal(singular, 'cat')
assert_equal(self.cat.singularize(), 'cat')
assert_true(isinstance(self.cat.singularize(), tb.Word))
def test_pluralize(self):
plural = self.cat.pluralize()
assert_equal(self.cat.pluralize(), 'cats')
assert_true(isinstance(plural, tb.Word))
def test_repr(self):
assert_equal(repr(self.cat), repr("cat"))
def test_str(self):
assert_equal(str(self.cat), 'cat')
def test_has_str_methods(self):
assert_equal(self.cat.upper(), "CAT")
assert_equal(self.cat.lower(), "cat")
assert_equal(self.cat[0:2], 'ca')
@mock.patch('textblob.translate.Translator.translate')
def test_translate(self, mock_translate):
mock_translate.return_value = 'gato'
assert_equal(tb.Word("cat").translate(to="es"), "gato")
@mock.patch('textblob.translate.Translator.translate')
def test_translate_without_from_lang(self, mock_translate):
mock_translate.return_value = 'hi'
assert_equal(tb.Word('hola').translate(), 'hi')
@mock.patch('textblob.translate.Translator.detect')
def test_detect_language(self, mock_detect):
mock_detect.return_value = 'fr'
assert_equal(tb.Word("bonjour").detect_language(), 'fr')
def test_spellcheck(self):
blob = tb.Word("speling")
suggestions = blob.spellcheck()
assert_equal(suggestions[0][0], "spelling")
def test_spellcheck_special_cases(self):
# Punctuation
assert_equal(tb.Word("!").spellcheck(), [("!", 1.0)])
# Numbers
assert_equal(tb.Word("42").spellcheck(), [("42", 1.0)])
assert_equal(tb.Word("12.34").spellcheck(), [("12.34", 1.0)])
# One-letter words
assert_equal(tb.Word("I").spellcheck(), [("I", 1.0)])
assert_equal(tb.Word("A").spellcheck(), [("A", 1.0)])
assert_equal(tb.Word("a").spellcheck(), [("a", 1.0)])
def test_correct(self):
w = tb.Word('speling')
correct = w.correct()
assert_equal(correct, tb.Word('spelling'))
assert_true(isinstance(correct, tb.Word))
@attr('slow')
def test_lemmatize(self):
w = tb.Word("cars")
assert_equal(w.lemmatize(), "car")
w = tb.Word("wolves")
assert_equal(w.lemmatize(), "wolf")
w = tb.Word("went")
assert_equal(w.lemmatize("v"), "go") # wordnet tagset
assert_equal(w.lemmatize("VBD"), "go") # penn treebank tagset
def test_lemma(self):
w = tb.Word("wolves")
assert_equal(w.lemma, "wolf")
w = tb.Word("went", "VBD");
assert_equal(w.lemma, "go")
def test_stem(self): #only PorterStemmer tested
w = tb.Word("cars")
assert_equal(w.stem(), "car")
w = tb.Word("wolves")
assert_equal(w.stem(), "wolv")
w = tb.Word("went")
assert_equal(w.stem(), "went")
def test_synsets(self):
w = tb.Word("car")
assert_true(isinstance(w.synsets, (list, tuple)))
assert_true(isinstance(w.synsets[0], Synset))
def test_synsets_with_pos_argument(self):
w = tb.Word("work")
noun_syns = w.get_synsets(pos=wn.NOUN)
for synset in noun_syns:
assert_equal(synset.pos(), wn.NOUN)
def test_definitions(self):
w = tb.Word("octopus")
for definition in w.definitions:
print(type(definition))
assert_true(isinstance(definition, basestring))
def test_define(self):
w = tb.Word("hack")
synsets = w.get_synsets(wn.NOUN)
definitions = w.define(wn.NOUN)
assert_equal(len(synsets), len(definitions))
class TestWordnetInterface(TestCase):
def setUp(self):
pass
def test_synset(self):
syn = wn.Synset("dog.n.01")
word = tb.Word("dog")
assert_equal(word.synsets[0], syn)
def test_lemma(self):
lemma = wn.Lemma('eat.v.01.eat')
word = tb.Word("eat")
assert_equal(word.synsets[0].lemmas()[0], lemma)
class BlobberTest(TestCase):
def setUp(self):
self.blobber = tb.Blobber() # The default blobber
def test_creates_blobs(self):
blob1 = self.blobber("this is one blob")
assert_true(isinstance(blob1, tb.TextBlob))
blob2 = self.blobber("another blob")
assert_equal(blob1.pos_tagger, blob2.pos_tagger)
def test_default_tagger(self):
blob = self.blobber("Some text")
assert_true(isinstance(blob.pos_tagger, NLTKTagger))
def test_default_np_extractor(self):
blob = self.blobber("Some text")
assert_true(isinstance(blob.np_extractor, FastNPExtractor))
def test_default_tokenizer(self):
blob = self.blobber("Some text")
assert_true(isinstance(blob.tokenizer, WordTokenizer))
def test_str_and_repr(self):
expected = "Blobber(tokenizer=WordTokenizer(), pos_tagger=NLTKTagger(), np_extractor=FastNPExtractor(), analyzer=PatternAnalyzer(), parser=PatternParser(), classifier=None)"
assert_equal(repr(self.blobber), expected)
assert_equal(str(self.blobber), repr(self.blobber))
def test_overrides(self):
b = tb.Blobber(tokenizer=SentenceTokenizer(),
np_extractor=ConllExtractor())
blob = b("How now? Brown cow?")
assert_true(isinstance(blob.tokenizer, SentenceTokenizer))
assert_equal(blob.tokens, tb.WordList(["How now?", "Brown cow?"]))
blob2 = b("Another blob")
# blobs have the same tokenizer
assert_true(blob.tokenizer is blob2.tokenizer)
# but aren't the same object
assert_not_equal(blob, blob2)
def test_override_analyzer(self):
b = tb.Blobber(analyzer=NaiveBayesAnalyzer())
blob = b("How now?")
blob2 = b("Brown cow")
assert_true(isinstance(blob.analyzer, NaiveBayesAnalyzer))
assert_true(blob.analyzer is blob2.analyzer)
def test_overrider_classifier(self):
b = tb.Blobber(classifier=classifier)
blob = b("I am so amazing")
assert_equal(blob.classify(), 'pos')
def is_blob(obj):
return isinstance(obj, tb.TextBlob)
if __name__ == '__main__':
main()
|
sloria/TextBlob
|
tests/test_blob.py
|
Python
|
mit
| 41,699
|
[
"Octopus"
] |
0eef10ba4c2c10d598d5ac987c1907cd53ca03a724b7e863062cb78b3c82dee1
|
# -*- coding: utf-8 -*-
"""
Unit tests for instructor.api methods.
"""
# pylint: disable=E1111
import unittest
import json
import requests
import datetime
from urllib import quote
from django.test import TestCase
from nose.tools import raises
from mock import Mock, patch
from django.conf import settings
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.http import HttpRequest, HttpResponse
from django_comment_common.models import FORUM_ROLE_COMMUNITY_TA, Role
from django_comment_common.utils import seed_permissions_roles
from django.core import mail
from django.utils.timezone import utc
from django.contrib.auth.models import User
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from courseware.tests.helpers import LoginEnrollmentTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from student.tests.factories import UserFactory
from courseware.tests.factories import StaffFactory, InstructorFactory, BetaTesterFactory
from student.roles import CourseBetaTesterRole
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from courseware.models import StudentModule
# modules which are mocked in test cases.
import instructor_task.api
from instructor.access import allow_access
import instructor.views.api
from instructor.views.api import _split_input_list, _msk_from_problem_urlname, common_exceptions_400
from instructor_task.api_helper import AlreadyRunningError
from .test_tools import get_extended_due
@common_exceptions_400
def view_success(request): # pylint: disable=W0613
"A dummy view for testing that returns a simple HTTP response"
return HttpResponse('success')
@common_exceptions_400
def view_user_doesnotexist(request): # pylint: disable=W0613
"A dummy view that raises a User.DoesNotExist exception"
raise User.DoesNotExist()
@common_exceptions_400
def view_alreadyrunningerror(request): # pylint: disable=W0613
"A dummy view that raises an AlreadyRunningError exception"
raise AlreadyRunningError()
class TestCommonExceptions400(unittest.TestCase):
"""
Testing the common_exceptions_400 decorator.
"""
def setUp(self):
self.request = Mock(spec=HttpRequest)
self.request.META = {}
def test_happy_path(self):
resp = view_success(self.request)
self.assertEqual(resp.status_code, 200)
def test_user_doesnotexist(self):
self.request.is_ajax.return_value = False
resp = view_user_doesnotexist(self.request)
self.assertEqual(resp.status_code, 400)
self.assertIn("User does not exist", resp.content)
def test_user_doesnotexist_ajax(self):
self.request.is_ajax.return_value = True
resp = view_user_doesnotexist(self.request)
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("User does not exist", result["error"])
def test_alreadyrunningerror(self):
self.request.is_ajax.return_value = False
resp = view_alreadyrunningerror(self.request)
self.assertEqual(resp.status_code, 400)
self.assertIn("Task is already running", resp.content)
def test_alreadyrunningerror_ajax(self):
self.request.is_ajax.return_value = True
resp = view_alreadyrunningerror(self.request)
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("Task is already running", result["error"])
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestInstructorAPIDenyLevels(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Ensure that users cannot access endpoints they shouldn't be able to.
"""
def setUp(self):
self.course = CourseFactory.create()
self.user = UserFactory.create()
CourseEnrollment.enroll(self.user, self.course.id)
self.problem_urlname = 'robot-some-problem-urlname'
_module = StudentModule.objects.create(
student=self.user,
course_id=self.course.id,
module_state_key=_msk_from_problem_urlname(
self.course.id,
self.problem_urlname
),
state=json.dumps({'attempts': 10}),
)
# Endpoints that only Staff or Instructors can access
self.staff_level_endpoints = [
('students_update_enrollment', {'identifiers': 'foo@example.org', 'action': 'enroll'}),
('get_grading_config', {}),
('get_students_features', {}),
('get_distribution', {}),
('get_student_progress_url', {'unique_student_identifier': self.user.username}),
('reset_student_attempts', {'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
('update_forum_role_membership', {'unique_student_identifier': self.user.email, 'rolename': 'Moderator', 'action': 'allow'}),
('list_forum_members', {'rolename': FORUM_ROLE_COMMUNITY_TA}),
('proxy_legacy_analytics', {'aname': 'ProblemGradeDistribution'}),
('send_email', {'send_to': 'staff', 'subject': 'test', 'message': 'asdf'}),
('list_instructor_tasks', {}),
('list_background_email_tasks', {}),
('list_report_downloads', {}),
('calculate_grades_csv', {}),
]
# Endpoints that only Instructors can access
self.instructor_level_endpoints = [
('bulk_beta_modify_access', {'identifiers': 'foo@example.org', 'action': 'add'}),
('modify_access', {'unique_student_identifier': self.user.email, 'rolename': 'beta', 'action': 'allow'}),
('list_course_role_members', {'rolename': 'beta'}),
('rescore_problem', {'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
]
def _access_endpoint(self, endpoint, args, status_code, msg):
"""
Asserts that accessing the given `endpoint` gets a response of `status_code`.
endpoint: string, endpoint for instructor dash API
args: dict, kwargs for `reverse` call
status_code: expected HTTP status code response
msg: message to display if assertion fails.
"""
url = reverse(endpoint, kwargs={'course_id': self.course.id})
if endpoint in 'send_email':
response = self.client.post(url, args)
else:
response = self.client.get(url, args)
print endpoint
print response
self.assertEqual(
response.status_code,
status_code,
msg=msg
)
def test_student_level(self):
"""
Ensure that an enrolled student can't access staff or instructor endpoints.
"""
self.client.login(username=self.user.username, password='test')
for endpoint, args in self.staff_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
def test_staff_level(self):
"""
Ensure that a staff member can't access instructor endpoints.
"""
staff_member = StaffFactory(course=self.course.location)
CourseEnrollment.enroll(staff_member, self.course.id)
self.client.login(username=staff_member.username, password='test')
# Try to promote to forums admin - not working
# update_forum_role(self.course.id, staff_member, FORUM_ROLE_ADMINISTRATOR, 'allow')
for endpoint, args in self.staff_level_endpoints:
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'proxy_legacy_analytics', 'list_forum_members']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Staff member should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Staff member should not be allowed to access endpoint " + endpoint
)
def test_instructor_level(self):
"""
Ensure that an instructor member can access all endpoints.
"""
inst = InstructorFactory(course=self.course.location)
CourseEnrollment.enroll(inst, self.course.id)
self.client.login(username=inst.username, password='test')
for endpoint, args in self.staff_level_endpoints:
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'proxy_legacy_analytics']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Instructor should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
# TODO: make this work
if endpoint in ['rescore_problem']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Instructor should be allowed to access endpoint " + endpoint
)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIEnrollment(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test enrollment modification endpoint.
This test does NOT exhaustively test state changes, that is the
job of test_enrollment. This tests the response and action switch.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.location)
self.client.login(username=self.instructor.username, password='test')
self.enrolled_student = UserFactory(username='EnrolledStudent', first_name='Enrolled', last_name='Student')
CourseEnrollment.enroll(
self.enrolled_student,
self.course.id
)
self.notenrolled_student = UserFactory(username='NotEnrolledStudent', first_name='NotEnrolled', last_name='Student')
# Create invited, but not registered, user
cea = CourseEnrollmentAllowed(email='robot-allowed@robot.org', course_id=self.course.id)
cea.save()
self.allowed_email = 'robot-allowed@robot.org'
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103)
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.enrolled_student.email, 'action': action})
self.assertEqual(response.status_code, 400)
def test_invalid_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': 'percivaloctavius@', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius@',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_invalid_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': 'percivaloctavius', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_with_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notenrolled_student.username, 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": self.notenrolled_student.username,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': False})
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_enroll_with_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': True})
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been enrolled in Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear NotEnrolled Student\n\nYou have been enrolled in Robot Super Course "
"at edx.org by a member of the course staff. "
"The course should now appear on your edx.org dashboard.\n\n"
"To start accessing course materials, please visit "
"https://edx.org/courses/MITx/999/Robot_Super_Course/\n\n----\n"
"This email was automatically sent from edx.org to NotEnrolled Student"
)
def test_enroll_with_email_not_registered(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True})
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit https://edx.org/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account, "
"visit https://edx.org/courses/MITx/999/Robot_Super_Course/about to join the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org"
)
def test_enroll_email_not_registered_mktgsite(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
# Try with marketing site enabled
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit https://edx.org/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"You can then enroll in Robot Super Course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org"
)
def test_enroll_with_email_not_registered_autoenroll(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True, 'auto_enroll': True})
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit https://edx.org/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account, you will see Robot Super Course listed on your dashboard.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org"
)
def test_unenroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll', 'email_students': False})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_unenroll_with_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll', 'email_students': True})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear Enrolled Student\n\nYou have been un-enrolled in Robot Super Course "
"at edx.org by a member of the course staff. "
"The course will no longer appear on your edx.org dashboard.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to Enrolled Student"
)
def test_unenroll_with_email_allowed_student(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.allowed_email, 'action': 'unenroll', 'email_students': True})
print "type(self.allowed_email): {}".format(type(self.allowed_email))
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.allowed_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": True,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear Student,\n\nYou have been un-enrolled from course Robot Super Course by a member of the course staff. "
"Please disregard the invitation previously sent.\n\n----\n"
"This email was automatically sent from edx.org to robot-allowed@robot.org"
)
@patch('instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib(self, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True})
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To access the course visit https://edx.org/courses/MITx/999/Robot_Super_Course/about and register for the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org"
)
@patch('instructor.enrollment.uses_shib')
def test_enroll_email_not_registered_shib_mktgsite(self, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
# Try with marketing site enabled
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org"
)
@patch('instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib_autoenroll(self, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True, 'auto_enroll': True})
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To access the course visit https://edx.org/courses/MITx/999/Robot_Super_Course/ and login.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org"
)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIBulkBetaEnrollment(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test bulk beta modify access endpoint.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.location)
self.client.login(username=self.instructor.username, password='test')
self.beta_tester = BetaTesterFactory(course=self.course.location)
CourseEnrollment.enroll(
self.beta_tester,
self.course.id
)
self.notenrolled_student = UserFactory(username='NotEnrolledStudent')
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103)
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': action})
self.assertEqual(response.status_code, 400)
def add_notenrolled(self, response, identifier):
"""
Test Helper Method (not a test, called by other tests)
Takes a client response from a call to bulk_beta_modify_access with 'email_students': False,
and the student identifier (email or username) given as 'identifiers' in the request.
Asserts the reponse returns cleanly, that the student was added as a beta tester, and the
response properly contains their identifier, 'error': False, and 'userDoesNotExist': False.
Additionally asserts no email was sent.
"""
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.location).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": identifier,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_add_notenrolled_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_email_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_with_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.location).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {0}\n\nYou have been invited to be a beta tester "
"for Robot Super Course at edx.org by a member of the course staff.\n\n"
"Visit https://edx.org/courses/MITx/999/Robot_Super_Course/about to join "
"the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {1}".format(
self.notenrolled_student.profile.name,
self.notenrolled_student.email
)
)
def test_add_notenrolled_with_email_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(
url,
{'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True, 'auto_enroll': True}
)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.location).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {0}\n\nYou have been invited to be a beta tester "
"for Robot Super Course at edx.org by a member of the course staff.\n\n"
"To start accessing course materials, please visit "
"https://edx.org/courses/MITx/999/Robot_Super_Course/\n\n----\n"
"This email was automatically sent from edx.org to {1}".format(
self.notenrolled_student.profile.name,
self.notenrolled_student.email
)
)
def test_add_notenrolled_email_mktgsite(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
# Try with marketing site enabled
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
u"Dear {0}\n\nYou have been invited to be a beta tester "
"for Robot Super Course at edx.org by a member of the course staff.\n\n"
"Visit edx.org to enroll in the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {1}".format(
self.notenrolled_student.profile.name,
self.notenrolled_student.email
)
)
def test_enroll_with_email_not_registered(self):
# User doesn't exist
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notregistered_email,
"error": True,
"userDoesNotExist": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_without_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': False})
self.assertEqual(response.status_code, 200)
self.assertFalse(CourseBetaTesterRole(self.course.location).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_with_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertFalse(CourseBetaTesterRole(self.course.location).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been removed from a beta test for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear {full_name}\n\nYou have been removed as a beta tester for "
"Robot Super Course at edx.org by a member of the course staff. "
"The course will remain on your dashboard, but you will no longer "
"be part of the beta testing group.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to {email_address}".format(
full_name=self.beta_tester.profile.name,
email_address=self.beta_tester.email
)
)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPILevelsAccess(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change permissions
of other users.
This test does NOT test whether the actions had an effect on the
database, that is the job of test_access.
This tests the response and action switch.
Actually, modify_access does not have a very meaningful
response yet, so only the status code is tested.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.location)
self.client.login(username=self.instructor.username, password='test')
self.other_instructor = UserFactory()
allow_access(self.course, self.other_instructor, 'instructor')
self.other_staff = UserFactory()
allow_access(self.course, self.other_staff, 'staff')
self.other_user = UserFactory()
def test_modify_access_noparams(self):
""" Test missing all query parameters. """
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_action(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'robot-not-an-action',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_role(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'robot-not-a-roll',
'action': 'revoke',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_allow(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'unique_student_identifier': self.other_instructor.email,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_allow_with_uname(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'unique_student_identifier': self.other_instructor.username,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_with_username(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.username,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_with_fake_user(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'unique_student_identifier': 'GandalfTheGrey',
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': 'GandalfTheGrey',
'userDoesNotExist': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_with_inactive_user(self):
self.other_user.is_active = False
self.other_user.save() # pylint: disable=no-member
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'unique_student_identifier': self.other_user.username,
'rolename': 'beta',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': self.other_user.username,
'inactiveUser': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_revoke_not_allowed(self):
""" Test revoking access that a user does not have. """
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_self(self):
"""
Test that an instructor cannot remove instructor privelages from themself.
"""
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'unique_student_identifier': self.instructor.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'unique_student_identifier': self.instructor.username,
'rolename': 'instructor',
'action': 'revoke',
'removingSelfAsInstructor': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_noparams(self):
""" Test missing all query parameters. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_bad_rolename(self):
""" Test with an invalid rolename parameter. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'rolename': 'robot-not-a-rolename',
})
print response
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_staff(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'rolename': 'staff',
})
print response
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id,
'staff': [
{
'username': self.other_staff.username,
'email': self.other_staff.email,
'first_name': self.other_staff.first_name,
'last_name': self.other_staff.last_name,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_beta(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'rolename': 'beta',
})
print response
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id,
'beta': []
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_update_forum_role_membership(self):
"""
Test update forum role membership with user's email and username.
"""
# Seed forum roles for course.
seed_permissions_roles(self.course.id)
# Test add discussion admin with email.
self.assert_update_forum_role_membership(self.other_user.email, "Administrator", "allow")
# Test revoke discussion admin with email.
self.assert_update_forum_role_membership(self.other_user.email, "Administrator", "revoke")
# Test add discussion moderator with username.
self.assert_update_forum_role_membership(self.other_user.username, "Moderator", "allow")
# Test revoke discussion moderator with username.
self.assert_update_forum_role_membership(self.other_user.username, "Moderator", "revoke")
# Test add discussion community TA with email.
self.assert_update_forum_role_membership(self.other_user.email, "Community TA", "allow")
# Test revoke discussion community TA with username.
self.assert_update_forum_role_membership(self.other_user.username, "Community TA", "revoke")
def assert_update_forum_role_membership(self, unique_student_identifier, rolename, action):
"""
Test update forum role membership.
Get unique_student_identifier, rolename and action and update forum role.
"""
url = reverse('update_forum_role_membership', kwargs={'course_id': self.course.id})
response = self.client.get(
url,
{
'unique_student_identifier': unique_student_identifier,
'rolename': rolename,
'action': action,
}
)
# Status code should be 200.
self.assertEqual(response.status_code, 200)
user_roles = self.other_user.roles.filter(course_id=self.course.id).values_list("name", flat=True)
if action == 'allow':
self.assertIn(rolename, user_roles)
elif action == 'revoke':
self.assertNotIn(rolename, user_roles)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPILevelsDataDump(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints that show data without side effects.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.location)
self.client.login(username=self.instructor.username, password='test')
self.students = [UserFactory() for _ in xrange(6)]
for student in self.students:
CourseEnrollment.enroll(student, self.course.id)
def test_get_students_features(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
url = reverse('get_students_features', kwargs={'course_id': self.course.id})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('students', res_json)
for student in self.students:
student_json = [
x for x in res_json['students']
if x['username'] == student.username
][0]
self.assertEqual(student_json['username'], student.username)
self.assertEqual(student_json['email'], student.email)
def test_get_anon_ids(self):
"""
Test the CSV output for the anonymized user ids.
"""
url = reverse('get_anon_ids', kwargs={'course_id': self.course.id})
with patch('instructor.views.api.unique_id_for_user') as mock_unique:
mock_unique.return_value = '42'
response = self.client.get(url, {})
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith('"User ID","Anonymized user ID"\n"2","42"\n'))
self.assertTrue(body.endswith('"7","42"\n'))
def test_list_report_downloads(self):
url = reverse('list_report_downloads', kwargs={'course_id': self.course.id})
with patch('instructor_task.models.LocalFSReportStore.links_for') as mock_links_for:
mock_links_for.return_value = [
('mock_file_name_1', 'https://1.mock.url'),
('mock_file_name_2', 'https://2.mock.url'),
]
response = self.client.get(url, {})
expected_response = {
"downloads": [
{
"url": "https://1.mock.url",
"link": "<a href=\"https://1.mock.url\">mock_file_name_1</a>",
"name": "mock_file_name_1"
},
{
"url": "https://2.mock.url",
"link": "<a href=\"https://2.mock.url\">mock_file_name_2</a>",
"name": "mock_file_name_2"
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected_response)
def test_calculate_grades_csv_success(self):
url = reverse('calculate_grades_csv', kwargs={'course_id': self.course.id})
with patch('instructor_task.api.submit_calculate_grades_csv') as mock_cal_grades:
mock_cal_grades.return_value = True
response = self.client.get(url, {})
success_status = "Your grade report is being generated! You can view the status of the generation task in the 'Pending Instructor Tasks' section."
self.assertIn(success_status, response.content)
def test_calculate_grades_csv_already_running(self):
url = reverse('calculate_grades_csv', kwargs={'course_id': self.course.id})
with patch('instructor_task.api.submit_calculate_grades_csv') as mock_cal_grades:
mock_cal_grades.side_effect = AlreadyRunningError()
response = self.client.get(url, {})
already_running_status = "A grade report generation task is already in progress. Check the 'Pending Instructor Tasks' table for the status of the task. When completed, the report will be available for download in the table below."
self.assertIn(already_running_status, response.content)
def test_get_students_features_csv(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
url = reverse('get_students_features', kwargs={'course_id': self.course.id})
response = self.client.get(url + '/csv', {})
self.assertEqual(response['Content-Type'], 'text/csv')
def test_get_distribution_no_feature(self):
"""
Test that get_distribution lists available features
when supplied no feature parameter.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(type(res_json['available_features']), list)
url = reverse('get_distribution', kwargs={'course_id': self.course.id})
response = self.client.get(url + u'?feature=')
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(type(res_json['available_features']), list)
def test_get_distribution_unavailable_feature(self):
"""
Test that get_distribution fails gracefully with
an unavailable feature.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'feature': 'robot-not-a-real-feature'})
self.assertEqual(response.status_code, 400)
def test_get_distribution_gender(self):
"""
Test that get_distribution fails gracefully with
an unavailable feature.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'feature': 'gender'})
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
print res_json
self.assertEqual(res_json['feature_results']['data']['m'], 6)
self.assertEqual(res_json['feature_results']['choices_display_names']['m'], 'Male')
self.assertEqual(res_json['feature_results']['data']['no_data'], 0)
self.assertEqual(res_json['feature_results']['choices_display_names']['no_data'], 'No Data')
def test_get_student_progress_url(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id})
url += "?unique_student_identifier={}".format(
quote(self.students[0].email.encode("utf-8"))
)
print url
response = self.client.get(url)
print response
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_from_uname(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id})
url += "?unique_student_identifier={}".format(
quote(self.students[0].username.encode("utf-8"))
)
print url
response = self.client.get(url)
print response
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_noparams(self):
""" Test that the endpoint 404's without the required query params. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_get_student_progress_url_nostudent(self):
""" Test that the endpoint 400's when requesting an unknown email. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIRegradeTask(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change student grades.
This includes resetting attempts and starting rescore tasks.
This test does NOT test whether the actions had an effect on the
database, that is the job of task tests and test_enrollment.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.location)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.problem_urlname = 'robot-some-problem-urlname'
self.module_to_reset = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=_msk_from_problem_urlname(
self.course.id,
self.problem_urlname
),
state=json.dumps({'attempts': 10}),
)
def test_reset_student_attempts_deletall(self):
""" Make sure no one can delete all students state on a problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
'delete_module': True,
})
print response.content
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_single(self):
""" Test reset single student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
print response.content
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_module = StudentModule.objects.get(pk=self.module_to_reset.pk)
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(instructor_task.api, 'submit_reset_problem_attempts_for_all_students')
def test_reset_student_attempts_all(self, act):
""" Test reset all student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
print response.content
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_missingmodule(self):
""" Test reset for non-existant problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': 'robot-not-a-real-module',
'unique_student_identifier': self.student.email,
})
print response.content
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_delete(self):
""" Test delete single student state. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'delete_module': True,
})
print response.content
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
self.assertEqual(
StudentModule.objects.filter(
student=self.module_to_reset.student,
course_id=self.module_to_reset.course_id,
# module_state_key=self.module_to_reset.module_state_key,
).count(),
0
)
def test_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'all_students': True,
})
print response.content
self.assertEqual(response.status_code, 400)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
print response.content
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single_from_uname(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.username,
})
print response.content
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_all_students')
def test_rescore_problem_all(self, act):
""" Test rescoring for all students. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
print response.content
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestInstructorSendEmail(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Checks that only instructors have access to email endpoints, and that
these endpoints are only accessible with courses that actually exist,
only with valid email messages.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.location)
self.client.login(username=self.instructor.username, password='test')
test_subject = u'\u1234 test subject'
test_message = u'\u6824 test message'
self.full_test_message = {
'send_to': 'staff',
'subject': test_subject,
'message': test_message,
}
def test_send_email_as_logged_in_instructor(self):
url = reverse('send_email', kwargs={'course_id': self.course.id})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 200)
def test_send_email_but_not_logged_in(self):
self.client.logout()
url = reverse('send_email', kwargs={'course_id': self.course.id})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_not_staff(self):
self.client.logout()
student = UserFactory()
self.client.login(username=student.username, password='test')
url = reverse('send_email', kwargs={'course_id': self.course.id})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_course_not_exist(self):
url = reverse('send_email', kwargs={'course_id': 'GarbageCourse/DNE/NoTerm'})
response = self.client.post(url, self.full_test_message)
self.assertNotEqual(response.status_code, 200)
def test_send_email_no_sendto(self):
url = reverse('send_email', kwargs={'course_id': self.course.id})
response = self.client.post(url, {
'subject': 'test subject',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_subject(self):
url = reverse('send_email', kwargs={'course_id': self.course.id})
response = self.client.post(url, {
'send_to': 'staff',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_message(self):
url = reverse('send_email', kwargs={'course_id': self.course.id})
response = self.client.post(url, {
'send_to': 'staff',
'subject': 'test subject',
})
self.assertEqual(response.status_code, 400)
class MockCompletionInfo(object):
"""Mock for get_task_completion_info"""
times_called = 0
def mock_get_task_completion_info(self, *args): # pylint: disable=unused-argument
"""Mock for get_task_completion_info"""
self.times_called += 1
if self.times_called % 2 == 0:
return True, 'Task Completed'
return False, 'Task Errored In Some Way'
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPITaskLists(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor task list endpoint.
"""
class FakeTask(object):
""" Fake task object """
FEATURES = [
'task_type',
'task_input',
'task_id',
'requester',
'task_state',
'created',
'status',
'task_message',
'duration_sec'
]
def __init__(self, completion):
for feature in self.FEATURES:
setattr(self, feature, 'expected')
# created needs to be a datetime
self.created = datetime.datetime(2013, 10, 25, 11, 42, 35)
# set 'status' and 'task_message' attrs
success, task_message = completion()
if success:
self.status = "Complete"
else:
self.status = "Incomplete"
self.task_message = task_message
# Set 'task_output' attr, which will be parsed to the 'duration_sec' attr.
self.task_output = '{"duration_ms": 1035000}'
self.duration_sec = 1035000 / 1000.0
def make_invalid_output(self):
"""Munge task_output to be invalid json"""
self.task_output = 'HI MY NAME IS INVALID JSON'
# This should be given the value of 'unknown' if the task output
# can't be properly parsed
self.duration_sec = 'unknown'
def to_dict(self):
""" Convert fake task to dictionary representation. """
attr_dict = {key: getattr(self, key) for key in self.FEATURES}
attr_dict['created'] = attr_dict['created'].isoformat()
return attr_dict
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.location)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.problem_urlname = 'robot-some-problem-urlname'
self.module = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=_msk_from_problem_urlname(
self.course.id,
self.problem_urlname
),
state=json.dumps({'attempts': 10}),
)
mock_factory = MockCompletionInfo()
self.tasks = [self.FakeTask(mock_factory.mock_get_task_completion_info) for _ in xrange(7)]
self.tasks[-1].make_invalid_output()
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
@patch.object(instructor_task.api, 'get_running_instructor_tasks')
def test_list_instructor_tasks_running(self, act):
""" Test list of all running tasks. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id})
mock_factory = MockCompletionInfo()
with patch('instructor.views.api.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_background_email_tasks(self, act):
"""Test list of background email tasks."""
act.return_value = self.tasks
url = reverse('list_background_email_tasks', kwargs={'course_id': self.course.id})
mock_factory = MockCompletionInfo()
with patch('instructor.views.api.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem(self, act):
""" Test list task history for problem. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id})
mock_factory = MockCompletionInfo()
with patch('instructor.views.api.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {
'problem_urlname': self.problem_urlname,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem_student(self, act):
""" Test list task history for problem AND student. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id})
mock_factory = MockCompletionInfo()
with patch('instructor.views.api.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {
'problem_urlname': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@override_settings(ANALYTICS_SERVER_URL="http://robotanalyticsserver.netbot:900/")
@override_settings(ANALYTICS_API_KEY="robot_api_key")
class TestInstructorAPIAnalyticsProxy(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor analytics proxy endpoint.
"""
class FakeProxyResponse(object):
""" Fake successful requests response object. """
def __init__(self):
self.status_code = requests.status_codes.codes.OK
self.content = '{"test_content": "robot test content"}'
class FakeBadProxyResponse(object):
""" Fake strange-failed requests response object. """
def __init__(self):
self.status_code = 'notok.'
self.content = '{"test_content": "robot test content"}'
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.location)
self.client.login(username=self.instructor.username, password='test')
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_url(self, act):
""" Test legacy analytics proxy url generation. """
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
print response.content
self.assertEqual(response.status_code, 200)
# check request url
expected_url = "{url}get?aname={aname}&course_id={course_id}&apikey={api_key}".format(
url="http://robotanalyticsserver.netbot:900/",
aname="ProblemGradeDistribution",
course_id=self.course.id,
api_key="robot_api_key",
)
act.assert_called_once_with(expected_url)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy(self, act):
"""
Test legacy analytics content proxyin, actg.
"""
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
print response.content
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_res = {'test_content': "robot test content"}
self.assertEqual(json.loads(response.content), expected_res)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_reqfailed(self, act):
""" Test proxy when server reponds with failure. """
act.return_value = self.FakeBadProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
print response.content
self.assertEqual(response.status_code, 500)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_missing_param(self, act):
""" Test proxy when missing the aname query parameter. """
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id})
response = self.client.get(url, {})
print response.content
self.assertEqual(response.status_code, 400)
self.assertFalse(act.called)
class TestInstructorAPIHelpers(TestCase):
""" Test helpers for instructor.api """
def test_split_input_list(self):
strings = []
lists = []
strings.append("Lorem@ipsum.dolor, sit@amet.consectetur\nadipiscing@elit.Aenean\r convallis@at.lacus\r, ut@lacinia.Sed")
lists.append(['Lorem@ipsum.dolor', 'sit@amet.consectetur', 'adipiscing@elit.Aenean', 'convallis@at.lacus', 'ut@lacinia.Sed'])
for (stng, lst) in zip(strings, lists):
self.assertEqual(_split_input_list(stng), lst)
def test_split_input_list_unicode(self):
self.assertEqual(_split_input_list('robot@robot.edu, robot2@robot.edu'), ['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'), ['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'), [u'robot@robot.edu', 'robot2@robot.edu'])
scary_unistuff = unichr(40960) + u'abcd' + unichr(1972)
self.assertEqual(_split_input_list(scary_unistuff), [scary_unistuff])
def test_msk_from_problem_urlname(self):
course_id = 'RobotU/Robots101/3001_Spring'
capa_urlname = 'capa_urlname'
capa_urlname_xml = 'capa_urlname.xml'
xblock_urlname = 'notaproblem/someothername'
xblock_urlname_xml = 'notaproblem/someothername.xml'
capa_msk = 'i4x://RobotU/Robots101/problem/capa_urlname'
xblock_msk = 'i4x://RobotU/Robots101/notaproblem/someothername'
for urlname in [capa_urlname, capa_urlname_xml]:
self.assertEqual(
_msk_from_problem_urlname(course_id, urlname),
capa_msk
)
for urlname in [xblock_urlname, xblock_urlname_xml]:
self.assertEqual(
_msk_from_problem_urlname(course_id, urlname),
xblock_msk
)
@raises(ValueError)
def test_msk_from_problem_urlname_error(self):
args = ('notagoodcourse', 'L2Node1')
_msk_from_problem_urlname(*args)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestDueDateExtensions(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test data dumps for reporting.
"""
def setUp(self):
"""
Fixtures.
"""
due = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=utc)
course = CourseFactory.create()
week1 = ItemFactory.create(due=due)
week2 = ItemFactory.create(due=due)
week3 = ItemFactory.create(due=due)
course.children = [week1.location.url(), week2.location.url(),
week3.location.url()]
homework = ItemFactory.create(
parent_location=week1.location,
due=due
)
week1.children = [homework.location.url()]
user1 = UserFactory.create()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week1.location.url()).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week2.location.url()).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week3.location.url()).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=homework.location.url()).save()
user2 = UserFactory.create()
StudentModule(
state='{}',
student_id=user2.id,
course_id=course.id,
module_state_key=week1.location.url()).save()
StudentModule(
state='{}',
student_id=user2.id,
course_id=course.id,
module_state_key=homework.location.url()).save()
user3 = UserFactory.create()
StudentModule(
state='{}',
student_id=user3.id,
course_id=course.id,
module_state_key=week1.location.url()).save()
StudentModule(
state='{}',
student_id=user3.id,
course_id=course.id,
module_state_key=homework.location.url()).save()
self.course = course
self.week1 = week1
self.homework = homework
self.week2 = week2
self.user1 = user1
self.user2 = user2
self.instructor = InstructorFactory(course=course.location)
self.client.login(username=self.instructor.username, password='test')
def test_change_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.url(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(datetime.datetime(2013, 12, 30, 0, 0, tzinfo=utc),
get_extended_due(self.course, self.week1, self.user1))
def test_reset_date(self):
self.test_change_due_date()
url = reverse('reset_due_date', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.url(),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(None,
get_extended_due(self.course, self.week1, self.user1))
def test_show_unit_extensions(self):
self.test_change_due_date()
url = reverse('show_unit_extensions',
kwargs={'course_id': self.course.id})
response = self.client.get(url, {'url': self.week1.location.url()})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Full Name': self.user1.profile.name,
u'Username': self.user1.username}],
u'header': [u'Username', u'Full Name', u'Extended Due Date'],
u'title': u'Users with due date extensions for %s' %
self.week1.display_name})
def test_show_student_extensions(self):
self.test_change_due_date()
url = reverse('show_student_extensions',
kwargs={'course_id': self.course.id})
response = self.client.get(url, {'student': self.user1.username})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Unit': self.week1.display_name}],
u'header': [u'Unit', u'Extended Due Date'],
u'title': u'Due date extensions for %s (%s)' % (
self.user1.profile.name, self.user1.username)})
|
echanna/EdxNotAFork
|
lms/djangoapps/instructor/tests/test_api.py
|
Python
|
agpl-3.0
| 88,197
|
[
"VisIt"
] |
8cf72e8cfb7d33ca637322eb8e5284f5897a2fdcb6900c605471c8b6415421bc
|
########################################################################
# $Id$
# File : ProcessMonitor.py
# Author : Stuart Paterson
########################################################################
""" The Process Monitor utility allows to calculate cumulative CPU time and memory
for a given PID and it's process group. This is only implemented for linux /proc
file systems but could feasibly be extended in the future.
"""
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.Subprocess import shellCall
__RCSID__ = "$Id$"
import os, re, platform
class ProcessMonitor:
#############################################################################
def __init__( self ):
""" Standard constructor
"""
self.log = gLogger.getSubLogger( 'ProcessMonitor' )
self.osType = platform.uname()
#############################################################################
def getCPUConsumed( self, pid ):
"""Returns the CPU consumed for supported platforms when supplied a PID.
"""
currentOS = self.__checkCurrentOS()
if currentOS.lower() == 'linux':
return self.getCPUConsumedLinux( pid )
else:
self.log.warn( 'Platform %s is not supported' % ( currentOS ) )
return S_ERROR( 'Unsupported platform' )
def getMemoryConsumed( self, pid ):
"""Returns the CPU consumed for supported platforms when supplied a PID.
"""
currentOS = self.__checkCurrentOS()
if currentOS.lower() == 'linux':
return self.getMemoryConsumedLinux( pid )
else:
self.log.warn( 'Platform %s is not supported' % ( currentOS ) )
return S_ERROR( 'Unsupported platform' )
def getResourceConsumedLinux( self, pid ):
"""Returns the CPU consumed given a PID assuming a proc file system exists.
"""
pid = str( pid )
masterProcPath = '/proc/%s/stat' % ( pid )
if not os.path.exists( masterProcPath ):
return S_ERROR( 'Process %s does not exist' % ( pid ) )
#Get the current process list
pidListResult = self.__getProcListLinux()
if not pidListResult['OK']:
return pidListResult
pidList = pidListResult['Value']
return self.__getChildResourceConsumedLinux( pid, pidList )
#############################################################################
def getCPUConsumedLinux( self, pid ):
"""Returns the CPU consumed given a PID assuming a proc file system exists.
"""
result = self.getResourceConsumedLinux( pid )
if not result['OK']:
return result
currentCPU = result['Value']['CPU']
self.log.verbose( 'Final CPU estimate is %s' % currentCPU )
return S_OK( currentCPU )
def getMemoryConsumedLinux( self, pid ):
""" Get the current memory consumption
"""
result = self.getResourceConsumedLinux( pid )
if not result['OK']:
return result
vsize = result['Value']['Vsize']
rss = result['Value']['RSS']
self.log.verbose( 'Current memory estimate is Vsize: %s, RSS: %s' % ( vsize, rss ) )
return S_OK( {'Vsize': vsize, 'RSS': rss } )
#############################################################################
def __getProcListLinux( self ):
"""Gets list of process IDs from /proc/*.
"""
result = shellCall( 10, 'ls -d /proc/[0-9]*' )
if not result['OK']:
if not 'Value' in result:
return result
procList = result['Value'][1].replace( '/proc/', '' ).split( '\n' )
return S_OK( procList )
#############################################################################
def __getChildResourceConsumedLinux( self, pid, pidList, infoDict = None ):
"""Adds contributions to CPU total from child processes recursively.
"""
childCPU = 0
vsize = 0
rss = 0
pageSize = os.sysconf('SC_PAGESIZE')
if not infoDict:
infoDict = {}
for pidCheck in pidList:
info = self.__getProcInfoLinux( pidCheck )
if info['OK']:
infoDict[pidCheck] = info['Value']
procGroup = self.__getProcGroupLinux( pid )
if not procGroup['OK']:
return procGroup
procGroup = procGroup['Value'].strip()
for pidCheck, info in infoDict.items():
if pidCheck in infoDict and info[3] == pid:
contribution = float( info[13] ) / 100 + float( info[14] ) / 100 + float( info[15] ) / 100 + float( info[16] ) / 100
childCPU += contribution
vsize += float( info[22] )
rss += float( info[23] ) * pageSize
self.log.debug( 'Added %s to CPU total (now %s) from child PID %s %s' % ( contribution, childCPU, info[0], info[1] ) )
del infoDict[pidCheck]
result = self.__getChildResourceConsumedLinux( pidCheck, pidList, infoDict )
if result['OK']:
childCPU += result['Value']['CPU']
vsize += result['Value']['Vsize']
rss += result['Value']['RSS']
#Next add any contributions from orphan processes in same process group
for pidCheck, info in infoDict.items():
if pidCheck in infoDict and info[3] == 1 and info[4] == procGroup:
contribution = float( info[13] ) / 100 + float( info[14] ) / 100 + float( info[15] ) / 100 + float( info[16] ) / 100
childCPU += contribution
vsize += float( info[22] )
rss += float( info[23] ) * pageSize
self.log.debug( 'Added %s to CPU total (now %s) from orphan PID %s %s' % ( contribution, childCPU, info[0], info[1] ) )
del infoDict[pidCheck]
#Finally add the parent itself
if pid in infoDict:
info = infoDict[pid]
contribution = float( info[13] ) / 100 + float( info[14] ) / 100 + float( info[15] ) / 100 + float( info[16] ) / 100
childCPU += contribution
vsize += float( info[22] )
rss += float( info[23] ) * pageSize
self.log.debug( 'Added %s to CPU total (now %s) from PID %s %s' % ( contribution, childCPU, info[0], info[1] ) )
del infoDict[pid]
# Some debug printout if 0 CPU is determined
if childCPU == 0:
self.log.error( 'Consumed CPU is found to be 0. Contributing processes:' )
for pidCheck in pidList:
if pidCheck not in infoDict:
info = self.__getProcInfoLinux( pidCheck )
if info['OK']:
self.log.error( ' PID:', info['Value'] )
return S_OK( { "CPU": childCPU,
"Vsize": vsize,
"RSS": rss } )
#############################################################################
def __getProcInfoLinux( self, pid ):
"""Attempts to read /proc/PID/stat and returns list of items if ok.
/proc/[pid]/stat
Status information about the process. This is used by ps(1).
It is defined in /usr/src/linux/fs/proc/array.c.
The fields, in order, with their proper scanf(3) format
specifiers, are:
pid %d (1) The process ID.
comm %s (2) The filename of the executable, in
parentheses. This is visible whether or not the
executable is swapped out.
state %c (3) One character from the string "RSDZTW" where R
is running, S is sleeping in an interruptible
wait, D is waiting in uninterruptible disk sleep,
Z is zombie, T is traced or stopped (on a signal),
and W is paging.
ppid %d (4) The PID of the parent.
pgrp %d (5) The process group ID of the process.
session %d (6) The session ID of the process.
tty_nr %d (7) The controlling terminal of the process. (The
minor device number is contained in the
combination of bits 31 to 20 and 7 to 0; the major
device number is in bits 15 to 8.)
tpgid %d (8) The ID of the foreground process group of the
controlling terminal of the process.
flags %u (%lu before Linux 2.6.22)
(9) The kernel flags word of the process. For bit
meanings, see the PF_* defines in the Linux kernel
source file include/linux/sched.h. Details depend
on the kernel version.
minflt %lu (10) The number of minor faults the process has
made which have not required loading a memory page
from disk.
cminflt %lu (11) The number of minor faults that the process's
waited-for children have made.
majflt %lu (12) The number of major faults the process has
made which have required loading a memory page
from disk.
cmajflt %lu (13) The number of major faults that the process's
waited-for children have made.
utime %lu (14) Amount of time that this process has been
scheduled in user mode, measured in clock ticks
(divide by sysconf(_SC_CLK_TCK)). This includes
guest time, guest_time (time spent running a
virtual CPU, see below), so that applications that
are not aware of the guest time field do not lose
that time from their calculations.
stime %lu (15) Amount of time that this process has been
scheduled in kernel mode, measured in clock ticks
(divide by sysconf(_SC_CLK_TCK)).
cutime %ld (16) Amount of time that this process's waited-for
children have been scheduled in user mode,
measured in clock ticks (divide by
sysconf(_SC_CLK_TCK)). (See also times(2).) This
includes guest time, cguest_time (time spent
running a virtual CPU, see below).
cstime %ld (17) Amount of time that this process's waited-for
children have been scheduled in kernel mode,
measured in clock ticks (divide by
sysconf(_SC_CLK_TCK)).
priority %ld
(18) (Explanation for Linux 2.6) For processes
running a real-time scheduling policy (policy
below; see sched_setscheduler(2)), this is the
negated scheduling priority, minus one; that is, a
number in the range -2 to -100, corresponding to
real-time priorities 1 to 99. For processes
running under a non-real-time scheduling policy,
this is the raw nice value (setpriority(2)) as
represented in the kernel. The kernel stores nice
values as numbers in the range 0 (high) to 39
(low), corresponding to the user-visible nice
range of -20 to 19.
Before Linux 2.6, this was a scaled value based on
the scheduler weighting given to this process.
nice %ld (19) The nice value (see setpriority(2)), a value
in the range 19 (low priority) to -20 (high
priority).
num_threads %ld
(20) Number of threads in this process (since
Linux 2.6). Before kernel 2.6, this field was
hard coded to 0 as a placeholder for an earlier
removed field.
itrealvalue %ld
(21) The time in jiffies before the next SIGALRM
is sent to the process due to an interval timer.
Since kernel 2.6.17, this field is no longer
maintained, and is hard coded as 0.
starttime %llu (was %lu before Linux 2.6)
(22) The time the process started after system
boot. In kernels before Linux 2.6, this value was
expressed in jiffies. Since Linux 2.6, the value
is expressed in clock ticks (divide by
sysconf(_SC_CLK_TCK)).
vsize %lu (23) Virtual memory size in bytes.
rss %ld (24) Resident Set Size: number of pages the
process has in real memory. This is just the
pages which count toward text, data, or stack
space. This does not include pages which have not
been demand-loaded in, or which are swapped out.
rsslim %lu (25) Current soft limit in bytes on the rss of the
process; see the description of RLIMIT_RSS in
getrlimit(2).
startcode %lu
(26) The address above which program text can run.
endcode %lu (27) The address below which program text can run.
startstack %lu
(28) The address of the start (i.e., bottom) of
the stack.
kstkesp %lu (29) The current value of ESP (stack pointer), as
found in the kernel stack page for the process.
kstkeip %lu (30) The current EIP (instruction pointer).
signal %lu (31) The bitmap of pending signals, displayed as a
decimal number. Obsolete, because it does not
provide information on real-time signals; use
/proc/[pid]/status instead.
blocked %lu (32) The bitmap of blocked signals, displayed as a
decimal number. Obsolete, because it does not
provide information on real-time signals; use
/proc/[pid]/status instead.
sigignore %lu
(33) The bitmap of ignored signals, displayed as a
decimal number. Obsolete, because it does not
provide information on real-time signals; use
/proc/[pid]/status instead.
sigcatch %lu
(34) The bitmap of caught signals, displayed as a
decimal number. Obsolete, because it does not
provide information on real-time signals; use
/proc/[pid]/status instead.
wchan %lu (35) This is the "channel" in which the process is
waiting. It is the address of a system call, and
can be looked up in a namelist if you need a
textual name. (If you have an up-to-date
/etc/psdatabase, then try ps -l to see the WCHAN
field in action.)
nswap %lu (36) Number of pages swapped (not maintained).
cnswap %lu (37) Cumulative nswap for child processes (not
maintained).
exit_signal %d (since Linux 2.1.22)
(38) Signal to be sent to parent when we die.
processor %d (since Linux 2.2.8)
(39) CPU number last executed on.
rt_priority %u (since Linux 2.5.19; was %lu before Linux
2.6.22)
(40) Real-time scheduling priority, a number in
the range 1 to 99 for processes scheduled under a
real-time policy, or 0, for non-real-time
processes (see sched_setscheduler(2)).
policy %u (since Linux 2.5.19; was %lu before Linux 2.6.22)
(41) Scheduling policy (see
sched_setscheduler(2)). Decode using the SCHED_*
constants in linux/sched.h.
delayacct_blkio_ticks %llu (since Linux 2.6.18)
(42) Aggregated block I/O delays, measured in
clock ticks (centiseconds).
guest_time %lu (since Linux 2.6.24)
(43) Guest time of the process (time spent running
a virtual CPU for a guest operating system),
measured in clock ticks (divide by
sysconf(_SC_CLK_TCK)).
cguest_time %ld (since Linux 2.6.24)
(44) Guest time of the process's children,
measured in clock ticks (divide by
sysconf(_SC_CLK_TCK)).
"""
procPath = '/proc/%s/stat' % ( pid )
try:
fopen = open( procPath, 'r' )
procStat = fopen.readline()
fopen.close()
except Exception:
return S_ERROR( 'Not able to check %s' % pid )
return S_OK( procStat.split( ' ' ) )
#############################################################################
def __getProcGroupLinux( self, pid ):
"""Returns UID for given PID.
"""
result = shellCall( 10, 'ps --no-headers -o pgrp -p %s' % ( pid ) )
if not result['OK']:
if not 'Value' in result:
return result
return S_OK( result['Value'][1] )
#############################################################################
def __checkCurrentOS( self ):
"""Checks it is possible to determine CPU consumed with this utility
for the current OS.
"""
localOS = None
self.osType = platform.uname()
if re.search( 'Darwin', self.osType[0] ):
localOS = 'Mac'
elif re.search( 'Windows', self.osType[0] ):
localOS = 'Windows'
else:
localOS = 'Linux'
self.log.debug( 'Will determine CPU consumed for %s flavour OS' % ( localOS ) )
return localOS
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
calancha/DIRAC
|
Core/Utilities/ProcessMonitor.py
|
Python
|
gpl-3.0
| 18,404
|
[
"DIRAC"
] |
9fe7ff32957a1dc756fa1e6b7f104682be4b1518705066620865154a73f68efa
|
#!/usr/bin/env python3
import os
from functions import auto_snapshot
from functions import enable_compression
from functions import validation
#
# Interactive ZFS on Linux setup
#
#
# Print Description
#
os.system("clear")
print("This script will provide a guided setup for ZFS on Linux. Feel free to modify and distribute.")
print("To contribute, visit 'https://github.com/jsirianni/zfs-auto-deploy' or email me at Joseph.Sirianni88@gmail.com")
#
# Get zpool name
#
zpool_name = str(input("\nInput zpool name: "))
#
# Get raid_type
#
raid_type = -1
while raid_type < 0 or raid_type > 5:
print("\nSpecify RAID type to be used")
print("0 = raid0 = minimum of two drives")
print("1 = raid1 = minimum of two drives")
print("2 = raid10 = minimum of four drives")
print("3 = raidz1 = minimum of three drives")
print("4 = raidz2 = minimum of four drives")
print("5 = raidz3 = minimum of five drives")
raid_type = int(input("\nInput RAID type: "))
#
# Determine ZFS RAID type
#
if raid_type == 0:
selected_raid_type = "raid0"
elif raid_type == 1:
selected_raid_type = "mirror"
elif raid_type == 2:
selected_raid_type = "mirror" # ZFS stripes multiple vdevs, aka raid10
elif raid_type == 3:
selected_raid_type = "raidz1"
elif raid_type == 4:
selected_raid_type = "raiz2"
elif raid_type == 5:
selected_raid_type = "raidz3"
#
# Get drive selection
#
drive_set_1 = ""
number_of_drives = 0
print("\nConfigure hard drives to use for ZFS")
print("Enter each drive one by one with the format shown. Example: /dev/sdb")
print("Enter 'done' when done selecting drives")
print("Enter 'list' if you need a list of drives")
while 1 == 1:
# Input a drive. Trim whitespace
drive = str(input("\nEnter drive: "))
drive = drive.strip()
# Check for blank input
if drive == "":
print("You cannot enter a blank drive. Input 'done' to end drive selection")
# Print list of drives if user enters "list"
if drive == "list":
os.system("sudo lsblk")
continue
# If user enters a drive, add it to the set
if drive != "done":
drive_set_1 += (drive + " ")
number_of_drives += 1
continue
# If user enters done, validate drive number requirement
elif drive == "done":
if raid_type == 0 and number_of_drives < 2:
print("\nRAID0 requires at least two drives.")
continue
elif raid_type == 1 and number_of_drives < 2:
print("\nRAID1 requires at least two drives.")
continue
elif raid_type == 3 and number_of_drives < 3:
print("\nRAIDZ1 requires at least three drives.")
continue
elif raid_type == 4 and number_of_drives < 4:
print("\nRAIDZ2 requires at least four drives.")
continue
elif raid_type == 5 and number_of_drives < 5:
print("\nRAIDZ3 requires at least five drives.")
continue
elif raid_type == 2 and number_of_drives < 4:
print("\nRAID10 requires at least four drives.")
continue
# If raid10, check if even number of drives
elif raid_type == 2:
d = (number_of_drives // 2)
d = (d * 2)
if d != number_of_drives:
print("\nRAID10 requires an even amount of drives")
continue
else:
break
# If all validation passes, break loop.
else:
break
#
# Clear screan, get feature selection
#
os.system("clear")
if input("\nSetup datasets? Y/N: ") == "y":
create_datasets = True
else:
create_datasets = False
if input("Setup auto snapshots? Y/N: ") == "y":
enable_auto_snapshots = True
else:
enable_auto_snapshots = False
if input("Setup Compression? Y/N: ") == "y":
enable_zfs_compression = True
else:
enable_zfs_compression = False
if input("Setup Gmail Email Alerts? Y/N: ") == "y":
gmail_alerts = True
else:
gmail_alerts = False
#
# Print ZFS deployment summary.
#
os.system("clear")
print("ZFS Deployment Configuration Summary\n")
print("Zpool name: " + zpool_name)
print("Number of drives: " + str(number_of_drives))
print("Drives to use: " + drive_set_1)
print("RAID Type: " + selected_raid_type)
if create_datasets == True:
print("Datasets will be created interactively")
if enable_auto_snapshots == True:
print("ZFS Auto Snapshots will be configured interactively")
if enable_zfs_compression == True:
print("ZFS compression will be configured interactively")
if gmail_alerts == True:
print("Gmail Email Alerts will be configured interactively")
'''
Get comfirmation to install
'''
if input("\n\nIs the above configuration correct? Y/N: ") == "y":
#
# Update repos and install zfsutils-linux
#
os.system("sudo apt-get update")
os.system("sudo apt-get install -y zfsutils-linux unzip")
#
# Create zpool
#
os.system("sudo zpool create -f " + zpool_name + " " + selected_raid_type + " " + drive_set_1)
os.system("clear")
#
# Create datasets and mount them
#
datasets = []
while create_datasets == True:
dataset = str(input("Enter a dataset name for zpool " + zpool_name + ": "))
datasets.append(dataset)
mount_dir = str(input("Enter mount point for " + zpool_name + "/" + dataset + ": "))
os.system("sudo mkdir " + mount_dir)
os.system("sudo zfs create -o mountpoint=" + mount_dir + " " + zpool_name + "/" + dataset)
if input("\nCreate another dataset? Y/N: ") != "y":
os.system("clear")
break
#
# Configure zfs snapshots
#
if enable_auto_snapshots == True:
#
# Call install function
#
auto_snapshot.install()
os.system("clear")
#
# Setup zpool global snapshots (all datasets)
#
if input("Setup zpool (global) snapshots? Y/N: ") == "y":
auto_snapshot.enable(zpool_name)
else:
auto_snapshot.disable(zpool_name)
#
# Setup dataset level snapshots
#
if input("Setup snapshots for each dataset? Y/N: ") == "y":
# Iterate through dataset list and setup snapshots
for i in datasets:
i = (zpool_name + "/" + i)
if input("Setup snapshots for " + i + " dataset? Y/N: ") == "y":
auto_snapshot.enable(i)
else:
auto_snapshot.disable(i)
#
# Configure ZFS Compression. Compression is off by default.
#
os.system("clear")
if enable_zfs_compression == True:
if input("Enable compression on entire zpool, and all datasets? Y/N: ") == "y":
enable_compression.enable(zpool_name)
else:
enable_compression.disable(zpool_name)
if input("Enable compression per dataset? Y/N: ") == "y":
for i in datasets:
n = (zpool_name + "/" + i)
if input("Enable compression for " + i + " dataset? Y/N: ") == "y":
enable_compression.enable(n)
else:
enable_compression.disable(n)
#
# Execute email alerts interactvie script
#
os.system("clear")
if gmail_alerts == True:
os.system("sudo sh gmail-alerts.sh")
#
# End Program
#
os.system("clear")
print("zfs-auto-desploy has finished. Please report any bugs!")
#
# User did not commit to configuration, abort`
#
else:
os.system("clear")
print("User aborted the setup")
|
jsirianni/zfs-auto-deploy
|
install.py
|
Python
|
gpl-3.0
| 7,650
|
[
"VisIt"
] |
55c77cd218e0d5a41cce9d6e3d2896d6053892d84e6c6ca6c9c81618b42f36f0
|
from optparse import OptionParser
import os
import time
import sys
from Bio.Blast import NCBIXML
formats = ['fastq', 'fasta', 'fa', 'fas']
class AlignmentBase:
""" Data structure to store alignment object. """
def __init__(self, subject_base, query_base, position_subject, position_query, position_query_local, query_name, subject_name, score, strand, query_geneName_local):
self.s = subject_base
self.q = query_base
self.position_subject = position_subject
self.position_query = position_query
self.position_query_local = position_query_local
self.exons_list = None
self.query_name = query_name
self.subject_name = subject_name
self.score = score
self.strand = strand
self.query_geneName_local = query_geneName_local
def set_exons(self, exons_list):
self.exons_list = exons_list
def save_exoninfo_in_gff(out_gff3_hlr, *args):
out_gff3_hlr.write("\t".join(args) + "\n")
def extract_and_write_gff(alignments_objects_list, out_gff3_global_hlr, out_gff3_local_hlr):
gff3_source = "."
gff3_frame = "."
gff3_feature = "blast_hit_model"
exons_dic = {} # exon_name: [position_query, position_query_local, last_exon_info]
for al_obj_in_hsp in alignments_objects_list:
for exon_info in al_obj_in_hsp.exons_list:
exon_name = exon_info[0]
exon_last_info = exon_info[2]
if exon_name not in exons_dic:
exons_dic[exon_name] = []
exons_dic[exon_name].append(
(al_obj_in_hsp.position_query, al_obj_in_hsp.position_query_local, exon_last_info))
# gff
for exon, gff_exon_info_list in exons_dic.iteritems():
gff3_atributes = "GeneName=" + al_obj_in_hsp.subject_name + ";ExonName=" + exon + ";" + exon_last_info
# global
set_position_query = set(x[0] for x in gff_exon_info_list)
save_exoninfo_in_gff(out_gff3_global_hlr,
al_obj_in_hsp.query_name,
gff3_source,
gff3_feature,
str(min(set_position_query)),
str(max(set_position_query)),
str(al_obj_in_hsp.score),
al_obj_in_hsp.strand,
gff3_frame,
gff3_atributes)
# local
set_position_query_local = set(x[1] for x in gff_exon_info_list)
save_exoninfo_in_gff(out_gff3_local_hlr,
al_obj_in_hsp.query_geneName_local,
gff3_source,
gff3_feature,
str(min(set_position_query_local)),
str(max(set_position_query_local)),
str(al_obj_in_hsp.score),
al_obj_in_hsp.strand,
gff3_frame,
gff3_atributes)
""" Output data model
001 . blast_hit_model 444 444 0.0 + . GeneName=ENSXETG00000026876;ExonName=ENSXETE00000408762;Last=False
001 . blast_hit_model 445 491 0.0 + . GeneName=ENSXETG00000026876;ExonName=ENSXETE00000385819;Last=False
001 . blast_hit_model 492 607 0.0 + . GeneName=ENSXETG00000026876;ExonName=ENSXETE00000377032;Last=False
001 . blast_hit_model 608 714 0.0 + . GeneName=ENSXETG00000026876;ExonName=ENSXETE00000373100;Last=False
001 . blast_hit_model 715 834 0.0 + . GeneName=ENSXETG00000026876;ExonName=ENSXETE00000423072;Last=False
001 . blast_hit_model 835 930 0.0 + . GeneName=ENSXETG00000026876;ExonName=ENSXETE00000377711;Last=False
001__q[651:4048]_s[444:3826]_ENSXETG00000026876 . blast_hit_model 1 1 0.0 + . GeneName=ENSXETG00000026876;ExonName=ENSXETE00000408762;Last=False
001__q[651:4048]_s[444:3826]_ENSXETG00000026876 . blast_hit_model 2 48 0.0 + . GeneName=ENSXETG00000026876;ExonName=ENSXETE00000385819;Last=False
001__q[651:4048]_s[444:3826]_ENSXETG00000026876 . blast_hit_model 49 164 0.0 + . GeneName=ENSXETG00000026876;ExonName=ENSXETE00000377032;Last=False
001__q[651:4048]_s[444:3826]_ENSXETG00000026876 . blast_hit_model 165 271 0.0 + . GeneName=ENSXETG00000026876;ExonName=ENSXETE00000373100;Last=False
001__q[651:4048]_s[444:3826]_ENSXETG00000026876 . blast_hit_model 272 391 0.0 + . GeneName=ENSXETG00000026876;ExonName=ENSXETE00000423072;Last=False
001__q[651:4048]_s[444:3826]_ENSXETG00000026876 . blast_hit_model 392 487 0.0 + . GeneName=ENSXETG00000026876;ExonName=ENSXETE00000377711;Last=False
001__q[651:4048]_s[444:3826]_ENSXETG00000026876 . blast_hit_model 488 615 0.0 + . GeneName=ENSXETG00000026876;ExonName=ENSXETE00000409016;Last=False
001__q[651:4048]_s[444:3826]_ENSXETG00000026876 . blast_hit_model 616 709 0.0 + . GeneName=ENSXETG00000026876;ExonName=ENSXETE00000414542;Last=False
gff3 model:
ENSXETG00000008118 . gene_model_exons 1 33 . -1 . Name=ENSXETE00000362374;Last=False
ENSXETG00000008118 . gene_model_exons 34 135 . -1 . Name=ENSXETE00000100066;Last=False
seqname - The name of the sequence. Typically a chromosome or a contig. Argo does not care what you put here. It will superimpose gff features on any sequence you like.
source - The program that generated this feature. Argo displays the value of this field in the inspector but does not do anything special with it.
feature - The name of this type of feature. The official GFF3 spec states that this should be a term from the SOFA ontology, but Argo does not do anything with this value except display it.
start - The starting position of the feature in the sequence. The first base is numbered 1.
end - The ending position of the feature (inclusive).
score - A score between 0 and 1000. If there is no score value, enter ".".
strand - Valid entries include '+', '-', or '.' (for don't know/don't care).
frame - If the feature is a coding exon, frame should be a number between 0-2 that represents the reading frame of the first base. If the feature is not a coding exon, the value should be '.'. Argo does not do anything with this field except display its value.
GFF3: grouping attributes Attribute keys and values are separated by '=' signs. Values must be URI encoded.quoted. Attribute pairs are separated by semicolons. Certain, special attributes are used for grouping and identification (See below). This field is the one important difference between GFF flavors.
"""
def get_hsp_alignment_object_list(hsp, alignment_geneName, query_geneName, query_geneName_local):
""" Generates list of hsp alignments. """
position_subject = hsp.sbjct_start
position_query = hsp.query_start # starting position for query, WITHOUT gaps
position_query_local = 1
# test for strand
gff3_strand = "+"
if hsp.sbjct_start > hsp.sbjct_end:
gff3_strand = "-"
# storage for output
alignments_objects_list = []
for index in xrange(len(str(hsp.sbjct))): # length of alignment, with gaps!
letter_subject = str(hsp.sbjct)[index]
letter_query = str(hsp.query)[index]
# case 1:
if letter_subject == "-" and letter_query == "-":
print("WARNING: gap in sbj & query, it should not be like this!")
# position_subject += 1
continue
# case 2
elif letter_subject != "-" and letter_query != "-": # this idx has exon:
al_base = AlignmentBase(letter_subject, letter_query, position_subject, position_query,
position_query_local, query_geneName, alignment_geneName, hsp.expect, gff3_strand,
query_geneName_local)
alignments_objects_list.append(al_base)
# minus strand, thus base position countdown
if gff3_strand == "-":
position_subject -= 1
else:
position_subject += 1
position_query += 1
position_query_local += 1
continue
# case 3
elif letter_subject == "-" and letter_query != "-":
al_base = AlignmentBase(letter_subject, letter_query, position_subject, position_query,
position_query_local, query_geneName, alignment_geneName, hsp.expect, gff3_strand,
query_geneName_local)
alignments_objects_list.append(al_base)
position_query += 1
position_query_local += 1
continue
# case 4
elif letter_subject != "-" and letter_query == "-":
# minus strand, thus base position countdown
if gff3_strand == "-":
position_subject -= 1
else:
position_subject += 1
continue
return alignments_objects_list
def main(args=[]):
usage = '''
usage: %prog [options] arg \nProgram parses blast XML file, translates exons boundaries (annotated on reference sequences) to query sequences (e.g. transcript)"
'''
parser = OptionParser(usage, version='%prog version 1.0')
parser.add_option("-r", "--reference_fasta", dest="REFERENCE_FASTA", help="reference in fasta format")
parser.add_option("-g", "--gff_reference_fasta", dest="GFF_REFERENCE_FASTA", help="annotation for reference in gff3 format")
parser.add_option("-q", "--query_fasta", dest="QUERY_FASTA", help="query in fasta format")
parser.add_option("-b", "--blast_db_path_and_name", dest="BLAST_DB_PATH_AND_NAME", help="blast+ database" ''', default="blast_out.xml"''')
parser.add_option("-v", "--blast_xml_file", dest="BLAST_XML_FILE", help="blast results in xml file format", default="blast_out.xml")
parser.add_option("-f", "--output_file", dest="OUTPUT_FILE", help="output file", action="store", type="string", default=str(__name__) + ".txt")
parser.add_option("-o", "--output_folder", dest="OUTPUT_FOLDER", help="output folder", default="./")
parser.add_option("-e", "--e_value_thresh", dest="E_VALUE_THRESH", help="threshold e-value", default=1e-8)
parser.add_option("-a", "--only_best_Alignment", dest="ONLY_BEST_ALIGNMENT", help="take only 1, best q-s pair", default=True)
parser.add_option("-w", "--blast_word_size", dest="BLAST_WORD_SIZE", help="blast word_size", default=11)
parser.add_option("-n", "--blast_num_threads", dest="BLAST_NUM_THREADS", help="number of threads", default=2)
parser.add_option("-m", "--blast_match_score", dest="BLAST_MATCH_SCORE", help="reward for nt match", default=1)
parser.add_option("-s", "--blast_mismatch_score", dest="BLAST_MISMATCH_SCORE", help="penalty for nt mismatch", default=-3)
parser.add_option("-y", "--blast_gap_open", dest="BLAST_GAP_OPEN", help="cost of opening a gap", default=5)
parser.add_option("-x", "--blast_gap_extend", dest="BLAST_GAP_EXTEND", help="cost of gap extension", default=2)
(options, arg) = parser.parse_args(args)
# --- Entering program
t_st = time.time()
if not os.path.isdir(options.OUTPUT_FOLDER):
sys.stdout.write('\nWrong output directory!')
return
os.chdir(options.OUTPUT_FOLDER)
logging_file = "log_output"
if options.OUTPUT_FILE != "":
logging_file = options.OUTPUT_FILE
log_info_hlr = open(options.OUTPUT_FOLDER + os.sep + logging_file + ".log", "w")
log_info = "Entering program: {}\n".format(os.path.basename(__file__))
sys.stdout.write(log_info)
log_info_hlr.write(log_info)
log_info = "\nUsed options: {}\n".format("\n".join(str(options).split(",")))
sys.stdout.write(log_info)
log_info_hlr.write(log_info)
# --- workspace
s = os.path.join(os.path.dirname(__file__), '.')
os.chdir(s)
print os.getcwd()
# --- parsing gff3 file
log_info = "Parsing {}\n".format(options.GFF_REFERENCE_FASTA)
sys.stdout.write(log_info)
log_info_hlr.write(log_info)
gff_dic = {}
s_prev_gen = ""
gff_ref_hlr = open(options.GFF_REFERENCE_FASTA, "r")
for line_gff in gff_ref_hlr:
line_gff_list = line_gff.split("\t")
gene_name = line_gff_list[0]
exon_start = int(line_gff_list[3])
exon_end = int(line_gff_list[4])
exon_strand = line_gff_list[6]
exon_name = line_gff_list[8].split(";")[0].split("=")[1]
last_exon = line_gff_list[8].split(";")[1].strip()
info_pack = [exon_name, exon_strand, last_exon]
if gene_name not in gff_dic:
coord_exons_dic = {}
for x_coord in range(exon_start, exon_end + 1):
coord_exons_dic[x_coord] = [info_pack]
gff_dic[gene_name] = coord_exons_dic
else:
coord_exons_dic = gff_dic[gene_name]
for x_coord in range(exon_start, exon_end + 1):
if x_coord not in coord_exons_dic:
coord_exons_dic[x_coord] = []
coord_exons_dic[x_coord].append(info_pack)
if gene_name != s_prev_gen:
s_prev_gen = gene_name
log_info = "Parsing gff for gene: {}\n".format(s_prev_gen)
sys.stdout.write(log_info)
log_info_hlr.write(log_info)
# --- blast analysis for each sequence
from Bio.Blast.Applications import NcbiblastnCommandline
blast_db_source = options.BLAST_DB_PATH_AND_NAME
blastx_cline = NcbiblastnCommandline(query=options.QUERY_FASTA, db=blast_db_source,
evalue=float(options.E_VALUE_THRESH), outfmt=5, out=options.BLAST_XML_FILE,
word_size=options.BLAST_WORD_SIZE,
num_threads=options.BLAST_NUM_THREADS, reward=options.BLAST_MATCH_SCORE,
penalty=options.BLAST_MISMATCH_SCORE, gapopen=options.BLAST_GAP_OPEN,
gapextend=options.BLAST_GAP_EXTEND)
stdout, stderr = blastx_cline()
# --- analysis of blast alignment
out_file_core = "exons_alignment_by_blast_out"
out_local_gff3_hlr = open(options.OUTPUT_FOLDER + os.sep + out_file_core + "_local.gff3", "w")
out_global_gff3_hlr = open(options.OUTPUT_FOLDER + os.sep + out_file_core + "_global.gff3", "w")
out_fasta_hlr = open(options.OUTPUT_FOLDER + os.sep + out_file_core + ".fasta", "w")
result_handle = open(options.BLAST_XML_FILE)
blast_records = NCBIXML.parse(result_handle)
for blast_record in blast_records:
for alignment in blast_record.alignments:
for hsp in alignment.hsps:
if hsp.expect < float(options.E_VALUE_THRESH):
alignment_geneName = str(alignment.hit_def)
print('sequence:', alignment.title)
print("len hsp.query", len(hsp.query))
print("len hsp.sbjct", len(hsp.sbjct))
print("len hsp.match", len(hsp.match))
print("hsp.sbjct", str(hsp.sbjct))
print("hsp.match", str(hsp.match))
print("hsp.query", str(hsp.query))
# coordinates: subject
print("hsp.sbjct_start", hsp.sbjct_start)
print("hsp.sbjct_start", hsp.sbjct_end)
# coordinates: query
print("hsp.sbjct_start", hsp.query_start)
print("hsp.sbjct_start", hsp.query_end)
coord_exons_dic = gff_dic[alignment_geneName]
# generate alignment objects list
query_geneName = str(blast_record.query)
query_geneName_local = query_geneName + "__q[" + str(hsp.query_start) + ":" + str(hsp.query_end) + "]" + "_s[" + str(hsp.sbjct_start) + ":" + str(
hsp.sbjct_end) + "]" + "_" + alignment_geneName
alignment_object_list = get_hsp_alignment_object_list(hsp, alignment_geneName, query_geneName, query_geneName_local)
query_seq = "".join([xx.q for xx in alignment_object_list])
out_fasta_hlr.write(">" + query_geneName_local + "\n")
out_fasta_hlr.write(query_seq + "\n")
# set exons info into alignment objects
for al_obj_in_hsp in alignment_object_list:
if al_obj_in_hsp.position_subject in coord_exons_dic:
al_obj_in_hsp.set_exons(coord_exons_dic[al_obj_in_hsp.position_subject])
# global & local gff output
extract_and_write_gff(alignment_object_list, out_global_gff3_hlr, out_local_gff3_hlr)
if options.ONLY_BEST_ALIGNMENT:
break
out_local_gff3_hlr.close()
out_global_gff3_hlr.close()
out_fasta_hlr.close()
# --- closing program
t_end = time.time()
sys.stdout.write("\n\nWork done...")
sys.stdout.write("\nProcess time [s]: " + str(t_end - t_st))
if __name__ == "__main__":
main(sys.argv[1:])
|
molecol/targeted-resequencing-with-mips
|
exon_boundaries_on_transcript/map_seq_on_exons_by_blast.py
|
Python
|
mit
| 17,633
|
[
"BLAST"
] |
136f06e6df7fcfcb215cb43998a3b4874e4d25a250b1aaf587c471474594f00a
|
##
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing GAMESS-US, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Toon Willems (Ghent University)
@author: Pablo Escobar (sciCORE, SIB, University of Basel)
@author: Benjamin Roberts (The University of Auckland)
"""
import fileinput
import glob
import os
import re
import shutil
import sys
import tempfile
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM, MANDATORY
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import mkdir, read_file, write_file
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd, run_cmd_qa
from easybuild.tools.systemtools import get_platform_name
from easybuild.tools import toolchain
class EB_GAMESS_minus_US(EasyBlock):
"""Support for building/installing GAMESS-US."""
@staticmethod
def extra_options():
"""Define custom easyconfig parameters for GAMESS-US."""
extra_vars = {
'ddi_comm': ['mpi', "DDI communication layer to use", CUSTOM],
'maxcpus': [None, "Maximum number of cores per node", MANDATORY],
'maxnodes': [None, "Maximum number of nodes", MANDATORY],
'runtest': [True, "Run GAMESS-US tests", CUSTOM],
'scratch_dir': ['$TMPDIR', "Scratch dir to be used in rungms script", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def __init__(self, *args, **kwargs):
"""Easyblock constructor, enable building in installation directory."""
super(EB_GAMESS_minus_US, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.testdir = None
if self.cfg['runtest']:
self.testdir = tempfile.mkdtemp()
# make sure test dir doesn't contain [ or ], rungms csh script doesn't handle that well ("set: No match")
if re.search(r'[\[\]]', self.testdir):
raise EasyBuildError("Temporary dir for tests '%s' will cause problems with rungms csh script", self.testdir)
def extract_step(self):
"""Extract sources."""
# strip off 'gamess' part to avoid having everything in a 'gamess' subdirectory
self.cfg['unpack_options'] = "--strip-components=1"
super(EB_GAMESS_minus_US, self).extract_step()
def configure_step(self):
"""Configure GAMESS-US build via provided interactive 'config' script."""
# machine type
platform_name = get_platform_name()
x86_64_linux_re = re.compile('^x86_64-.*$')
if x86_64_linux_re.match(platform_name):
machinetype = "linux64"
else:
raise EasyBuildError("Build target %s currently unsupported", platform_name)
# compiler config
comp_fam = self.toolchain.comp_family()
fortran_comp, fortran_ver = None, None
if comp_fam == toolchain.INTELCOMP:
fortran_comp = 'ifort'
(out, _) = run_cmd("ifort -v", simple=False)
res = re.search(r"^ifort version ([0-9]+)\.[0-9.]+$", out)
if res:
fortran_ver = res.group(1)
else:
raise EasyBuildError("Failed to determine ifort major version number")
elif comp_fam == toolchain.GCC:
fortran_comp = 'gfortran'
fortran_ver = '.'.join(get_software_version('GCC').split('.')[:2])
else:
raise EasyBuildError("Compiler family '%s' currently unsupported.", comp_fam)
# math library config
known_mathlibs = ['imkl', 'OpenBLAS', 'ATLAS', 'ACML']
mathlib, mathlib_root = None, None
for mathlib in known_mathlibs:
mathlib_root = get_software_root(mathlib)
if mathlib_root is not None:
break
if mathlib_root is None:
raise EasyBuildError("None of the known math libraries (%s) available, giving up.", known_mathlibs)
if mathlib == 'imkl':
mathlib = 'mkl'
mathlib_root = os.path.join(mathlib_root, 'mkl')
else:
mathlib = mathlib.lower()
# verify selected DDI communication layer
known_ddi_comms = ['mpi', 'mixed', 'shmem', 'sockets']
if not self.cfg['ddi_comm'] in known_ddi_comms:
raise EasyBuildError("Unsupported DDI communication layer specified (known: %s): %s",
known_ddi_comms, self.cfg['ddi_comm'])
# MPI library config
mpilib, mpilib_root, mpilib_path = None, None, None
if self.cfg['ddi_comm'] == 'mpi':
known_mpilibs = ['impi', 'OpenMPI', 'MVAPICH2', 'MPICH2']
for mpilib in known_mpilibs:
mpilib_root = get_software_root(mpilib)
if mpilib_root is not None:
break
if mpilib_root is None:
raise EasyBuildError("None of the known MPI libraries (%s) available, giving up.", known_mpilibs)
mpilib_path = mpilib_root
if mpilib == 'impi':
mpilib_path = os.path.join(mpilib_root, 'intel64')
else:
mpilib = mpilib.lower()
# run interactive 'config' script to generate install.info file
cmd = "%(preconfigopts)s ./config %(configopts)s" % {
'preconfigopts': self.cfg['preconfigopts'],
'configopts': self.cfg['configopts'],
}
qa = {
"After the new window is open, please hit <return> to go on.": '',
"please enter your target machine name: ": machinetype,
"Version? [00] ": self.version,
"Please enter your choice of FORTRAN: ": fortran_comp,
"hit <return> to continue to the math library setup.": '',
"MKL pathname? ": mathlib_root,
"MKL version (or 'skip')? ": 'skip',
"please hit <return> to compile the GAMESS source code activator": '',
"please hit <return> to set up your network for Linux clusters.": '',
"communication library ('sockets' or 'mpi')? ": self.cfg['ddi_comm'],
"Enter MPI library (impi, mvapich2, mpt, sockets):": mpilib,
"Please enter your %s's location: " % mpilib: mpilib_root,
"Do you want to try LIBCCHEM? (yes/no): ": 'no',
"Enter full path to OpenBLAS libraries (without 'lib' subdirectory):": mathlib_root,
}
stdqa = {
r"GAMESS directory\? \[.*\] ": self.builddir,
r"GAMESS build directory\? \[.*\] ": self.installdir, # building in install directory
r"Enter only the main version number, such as .*\nVersion\? ": fortran_ver,
r"gfortran version.\nPlease enter only the first decimal place, such as .*:": fortran_ver,
"Enter your choice of 'mkl' or .* 'none': ": mathlib,
}
run_cmd_qa(cmd, qa=qa, std_qa=stdqa, log_all=True, simple=True)
self.log.debug("Contents of install.info:\n%s" % read_file(os.path.join(self.builddir, 'install.info')))
# patch hardcoded settings in rungms to use values specified in easyconfig file
rungms = os.path.join(self.builddir, 'rungms')
extra_gmspath_lines = "set ERICFMT=$GMSPATH/auxdata/ericfmt.dat\nset MCPPATH=$GMSPATH/auxdata/MCP\n"
try:
for line in fileinput.input(rungms, inplace=1, backup='.orig'):
line = re.sub(r"^(\s*set\s*TARGET)=.*", r"\1=%s" % self.cfg['ddi_comm'], line)
line = re.sub(r"^(\s*set\s*GMSPATH)=.*", r"\1=%s\n%s" % (self.installdir, extra_gmspath_lines), line)
line = re.sub(r"(null\) set VERNO)=.*", r"\1=%s" % self.version, line)
line = re.sub(r"^(\s*set DDI_MPI_CHOICE)=.*", r"\1=%s" % mpilib, line)
line = re.sub(r"^(\s*set DDI_MPI_ROOT)=.*%s.*" % mpilib.lower(), r"\1=%s" % mpilib_path, line)
line = re.sub(r"^(\s*set GA_MPI_ROOT)=.*%s.*" % mpilib.lower(), r"\1=%s" % mpilib_path, line)
# comment out all adjustments to $LD_LIBRARY_PATH that involves hardcoded paths
line = re.sub(r"^(\s*)(setenv\s*LD_LIBRARY_PATH\s*/.*)", r"\1#\2", line)
if self.cfg['scratch_dir']:
line = re.sub(r"^(\s*set\s*SCR)=.*", r"\1=%s" % self.cfg['scratch_dir'], line)
line = re.sub(r"^(\s*set\s*USERSCR)=.*", r"\1=%s" % self.cfg['scratch_dir'], line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", rungms, err)
def build_step(self):
"""Custom build procedure for GAMESS-US: using compddi, compall and lked scripts."""
compddi = os.path.join(self.cfg['start_dir'], 'ddi', 'compddi')
run_cmd(compddi, log_all=True, simple=True)
# make sure the libddi.a library is present
libddi = os.path.join(self.cfg['start_dir'], 'ddi', 'libddi.a')
if not os.path.isfile(libddi):
raise EasyBuildError("The libddi.a library (%s) was never built", libddi)
else:
self.log.info("The libddi.a library (%s) was successfully built." % libddi)
compall_cmd = os.path.join(self.cfg['start_dir'], 'compall')
compall = "%s %s %s" % (self.cfg['prebuildopts'], compall_cmd, self.cfg['buildopts'])
run_cmd(compall, log_all=True, simple=True)
cmd = "%s gamess %s" % (os.path.join(self.cfg['start_dir'], 'lked'), self.version)
run_cmd(cmd, log_all=True, simple=True)
def test_step(self):
"""Run GAMESS-US tests (if 'runtest' easyconfig parameter is set to True)."""
# don't use provided 'runall' script for tests, since that only runs the tests single-core
if self.cfg['runtest']:
try:
cwd = os.getcwd()
os.chdir(self.testdir)
except OSError, err:
raise EasyBuildError("Failed to move to temporary directory for running tests: %s", err)
# copy input files for exam<id> standard tests
for test_input in glob.glob(os.path.join(self.installdir, 'tests', 'standard', 'exam*.inp')):
try:
shutil.copy2(test_input, os.getcwd())
except OSError, err:
raise EasyBuildError("Failed to copy %s to %s: %s", test_input, os.getcwd(), err)
rungms = os.path.join(self.installdir, 'rungms')
test_env_vars = ['TMPDIR=%s' % self.testdir]
if self.toolchain.mpi_family() == toolchain.INTELMPI:
test_env_vars.extend([
'I_MPI_FALLBACK=enable', # enable fallback in case first fabric fails (see $I_MPI_FABRICS_LIST)
'I_MPI_HYDRA_BOOTSTRAP=fork', # tests are only run locally (2 processes), so no SSH required
])
# run all exam<id> tests, dump output to exam<id>.log
n_tests = 47
for i in range(1, n_tests+1):
test_cmd = ' '.join(test_env_vars + [rungms, 'exam%02d' % i, self.version, '1', '2'])
(out, _) = run_cmd(test_cmd, log_all=True, simple=False)
write_file('exam%02d.log' % i, out)
# verify output of tests
check_cmd = os.path.join(self.installdir, 'tests', 'standard', 'checktst')
(out, _) = run_cmd(check_cmd, log_all=True, simple=False)
success_regex = re.compile("^All %d test results are correct" % n_tests, re.M)
if success_regex.search(out):
self.log.info("All tests ran successfully!")
else:
raise EasyBuildError("Not all tests ran successfully...")
# cleanup
os.chdir(cwd)
try:
shutil.rmtree(self.testdir)
except OSError, err:
raise EasyBuildError("Failed to remove test directory %s: %s", self.testdir, err)
def install_step(self):
"""Skip install step, since we're building in the install directory."""
pass
def sanity_check_step(self):
"""Custom sanity check for GAMESS-US."""
custom_paths = {
'files': ['gamess.%s.x' % self.version, 'rungms'],
'dirs': [],
}
super(EB_GAMESS_minus_US, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Define GAMESS-US specific variables in generated module file, i.e. $GAMESSUSROOT."""
txt = super(EB_GAMESS_minus_US, self).make_module_extra()
txt += self.module_generator.set_environment('GAMESSUSROOT', self.installdir)
txt += self.module_generator.prepend_paths("PATH", [''])
return txt
|
hpcleuven/easybuild-easyblocks
|
easybuild/easyblocks/g/gamess_us.py
|
Python
|
gpl-2.0
| 14,039
|
[
"GAMESS"
] |
2a4e57825d7a878c7bebf6d18564b066dd05f9567626806564ee16175230c172
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# main.py
#
# Copyright 2015 Eusebio Aguilera <eusebio.aguilera@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
__author__ = "Eusebio J. Aguilera Aguilera"
__copyright__ = "Eusebio J. Aguilera Aguilera"
__credits__ = "Eusebio J. Aguilera Aguilera"
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Eusebio J. Aguilera Aguilera"
import os
from os.path import join
import time
from skimage import io
import numpy as np
from matplotlib import pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn import tree
from sklearn.dummy import DummyClassifier
from prettytable import PrettyTable
from tools.features import LBPUFeatures, LBPUMultiblockFeatures, HOGFeatures, LBPUMultiBlockAndHOGFeatures
# This delta values are used to extract the images inside the big image
TRAIN_DELTA = 28
TEST_DELTA = 28
"""
This function is used to extract the little examples in the big image
The param is a numpy array
"""
def extract_images(img):
imgs = []
x, y = img.shape
for i in range(0, x, TRAIN_DELTA):
for j in range(0, y, TRAIN_DELTA):
if np.sum(img[i:i+27, j:j+27]) > 0:
imgs.append(img[i:i+27, j:j+27])
#print i, j
#print x, y
return imgs
"""
This function is used to train a model using a classifier
"""
def test_classifiers(train_path, test_path):
myfeat_train = list()
mylabel_train = list()
myfeat_test = list()
mylabel_test = list()
for i in range(10):
fname = join(train_path, '%s.jpg' % str(i))
img = io.imread(fname)
# Extract images
imgs = extract_images(img)
# Extract features
for x in imgs:
#tmp = LBPUFeatures(x)
#tmp = LBPUMultiblockFeatures(x)
#tmp = HOGFeatures(x)
tmp = LBPUMultiBlockAndHOGFeatures(x)
feat = tmp.getFeatures()
myfeat_train.append(feat)
mylabel_train.append(i)
print "Features obtained for train class", i
for i in range(10):
fname = join(test_path, '%s.jpg' % str(i))
img = io.imread(fname)
# Extract images
imgs = extract_images(img)
# Extract features
for x in imgs:
#lbp = extract_features(x)
#tmp = LBPUMultiblockFeatures(x)
#tmp = HOGFeatures(x)
tmp = LBPUMultiBlockAndHOGFeatures(x)
feat = tmp.getFeatures()
myfeat_test.append(feat)
mylabel_test.append(i)
print "Features obtained for test class", i
# Train
# Create a classifier: a support vector classifier
svml = LinearSVC()
rf = RandomForestClassifier()
gnb = GaussianNB()
tr = tree.DecisionTreeClassifier()
dummy = DummyClassifier()
print "Training ..."
# Train
# Compute traing time
ttime = []
tt = time.time()
gnb.fit(myfeat_train, mylabel_train)
ttime.append(time.time()-tt)
tt = time.time()
svml.fit(myfeat_train, mylabel_train)
ttime.append(time.time()-tt)
tt = time.time()
rf.fit(myfeat_train, mylabel_train)
ttime.append(time.time()-tt)
tt = time.time()
tr.fit(myfeat_train, mylabel_train)
ttime.append(time.time()-tt)
tt = time.time()
dummy.fit(myfeat_train, mylabel_train)
ttime.append(time.time()-tt)
print "Classifying ..."
names = ["Gaussian Naive Bayes", "Random Forest", "SVM", "Decision Tree", "Dummy (Baseline)"]
colors = ["r", "b", "g", "m", "k"]
scores = []
ctime = []
tt = time.time()
scores.append(gnb.score(myfeat_test, mylabel_test))
ctime.append(time.time()-tt)
tt = time.time()
scores.append(rf.score(myfeat_test, mylabel_test))
ctime.append(time.time()-tt)
tt = time.time()
scores.append(svml.score(myfeat_test, mylabel_test))
ctime.append(time.time()-tt)
tt = time.time()
scores.append(tr.score(myfeat_test, mylabel_test))
ctime.append(time.time()-tt)
tt = time.time()
scores.append(dummy.score(myfeat_test, mylabel_test))
ctime.append(time.time()-tt)
pt = PrettyTable(["Classifier", "Score", "Training time (s)", "Classifying time (s)", "Total time (s)"])
for i in range(len(names)):
pt.add_row([names[i], scores[i], ttime[i], ctime[i], ttime[i]+ctime[i]])
print pt
for i in range(len(names)):
plt.bar(i, scores[i], label=names[i], color=colors[i])
plt.legend()
plt.show()
def main():
current = os.path.dirname(os.path.abspath(__file__))
train_path = join(current, 'dataset/train/')
test_path = join(current, 'dataset/test/')
test_classifiers(train_path, test_path)
return 0
if __name__ == '__main__':
main()
|
eusebioaguilera/cvsamples
|
handwriting_recognition/main.py
|
Python
|
lgpl-3.0
| 5,553
|
[
"Gaussian"
] |
025203632dc1d6cd78b1d085d7d23662e4aa3760bbf592d288a92ee89d18e8ad
|
import numpy
import numpy.linalg
import scipy.linalg
import scipy.interpolate
from scipy.signal import wiener, filtfilt, butter, gaussian
from scipy.ndimage import filters
from matplotlib import pyplot as plt
plt.style.use('classic')
from assimulo.solvers import IDA
from assimulo.problem import Implicit_Problem
from scipy.sparse.linalg import spsolve as sparseSolve
from scipy.sparse import csr_matrix as sparseMat
import scipy.sparse as sparse
import math
from copy import deepcopy
def ButterworthFilter( x, y, ff=0.2 ) :
b, a = butter(1, ff)
fl = filtfilt( b, a, y )
return fl
def get_smooth_Uref_data( Ua_path, Uc_path, ffa=0.4, ffc=0.2 ) :
"""
Smooth the Uref data to aid in improving numerical stability.
This should be verified by the user to ensure it is not changing the original
Uref data beyond a tolerable amount (defined by the user).
A linear interpolator class is output for Uref and dUref_dx for both anode
and cathode.
"""
## Load the data files
uref_a_map = numpy.loadtxt( Ua_path, delimiter=',' )
uref_c_map = numpy.loadtxt( bsp_dir + '/data/Model_v1/Model_Pars/solid/thermodynamics/uref_cathode_bigx.csv', delimiter=',' )
if uref_a_map[1,0] < uref_a_map[0,0] :
uref_a_map = numpy.flipud( uref_a_map )
if uref_c_map[1,0] < uref_c_map[0,0] :
uref_c_map = numpy.flipud( uref_c_map )
xa = uref_a_map[:,0]
xc = uref_c_map[:,0]
# big_xa = numpy.linspace( xa[0], xa[-1], 300 )
# big_xc = numpy.linspace( xc[0], xc[-1], 300 )
# big_Ua = numpy.interp( big_xa, xa, uref_a_map[:,1] )
# big_Uc = numpy.interp( big_xc, xc, uref_c_map[:,1] )
# numpy.savetxt( bsp_dir + '/data/Model_v1/Model_Pars/solid/thermodynamics/uref_anode_bigx.csv', numpy.array([big_xa, big_Ua]).T, delimiter=',' )
# numpy.savetxt( bsp_dir + '/data/Model_v1/Model_Pars/solid/thermodynamics/uref_cathode_bigx.csv', numpy.array([big_xc, big_Uc]).T, delimiter=',' )
## Smooth the signals
Ua_butter = ButterworthFilter( xa, uref_a_map[:,1], ff=ffa )
Uc_butter = ButterworthFilter( xc, uref_c_map[:,1], ff=ffc )
## Create the interpolators
Ua_intp = scipy.interpolate.interp1d( xa, Ua_butter, kind='linear' )
Uc_intp = scipy.interpolate.interp1d( xc, Uc_butter, kind='linear' )
# duref_a_map = numpy.gradient( uref_a_map[:,1] ) / numpy.gradient( xa )
# duref_c_map = numpy.gradient( uref_c_map[:,1] ) / numpy.gradient( xc )
duref_a = numpy.gradient( Ua_butter ) / numpy.gradient( xa )
duref_c = numpy.gradient( Uc_butter ) / numpy.gradient( xc )
dUa_intp = scipy.interpolate.interp1d( xa, duref_a, kind='linear' )
dUc_intp = scipy.interpolate.interp1d( xc, duref_c, kind='linear' )
# # Plot the Uref data for verification
# plt.figure()
# plt.plot( xa, uref_a_map[:,1], label='Ua map' )
# plt.plot( xc, uref_c_map[:,1], label='Uc map' )
## plt.plot( xa, Ua_butter, label='Ua butter' )
## plt.plot( xc, Uc_butter, label='Uc butter' )
# plt.plot( xa, self.uref_a_interp(xa), label='Ua interp lin' )
# plt.plot( xc, self.uref_c_interp(xc), label='Uc interp lin' )
# plt.legend()
# plt.figure()
# plt.plot( xa, duref_a_map, label='dUa map' )
# plt.plot( xc, duref_c_map, label='dUc map' )
## plt.plot( xa, duref_a_b, label='dUa B' )
## plt.plot( xc, duref_c_b, label='dUc B' )
# plt.plot( xa, self.duref_a_interp(xa), label='dUa interp butter' )
# plt.plot( xc, self.duref_c_interp(xc), label='dUc interp butter' )
# plt.legend()
# plt.show()
return Ua_intp, Uc_intp, dUa_intp, dUc_intp
def nonlinspace( Rf,k,N ) :
r = numpy.zeros(N)
for i in range(N) :
r[i] = (1./k)**(-i)
if k!=1 :
r=max(r)-r
r=r/max(r)*Rf
else :
r=r*Rf
return r
def mid_to_edge( var_mid, x_e ) :
var_edge = numpy.array( [var_mid[0]] + [ var_mid[i]*var_mid[i+1]/( ((x_e[i+1]-x_e[i])/((x_e[i+2]-x_e[i+1])+(x_e[i+1]-x_e[i])))*var_mid[i+1] + (1- ((x_e[i+1]-x_e[i])/((x_e[i+2]-x_e[i+1])+(x_e[i+1]-x_e[i]))))*var_mid[i] ) for i in range(len(var_mid)-1) ] + [var_mid[-1]] )
return var_edge
def flux_mat_builder( N, x_m, vols, P ) :
A = numpy.zeros([N,N], dtype='d')
for i in range(1,N-1) :
A[i,i-1] = (1./vols[i]) * (P[i ]) / (x_m[i ] - x_m[i-1])
A[i,i ] = -(1./vols[i]) * (P[i ]) / (x_m[i ] - x_m[i-1]) - (1./vols[i]) * (P[i+1]) / (x_m[i+1] - x_m[i])
A[i,i+1] = (1./vols[i]) * (P[i+1]) / (x_m[i+1] - x_m[i ])
i=0
A[0,0] = -(1./vols[i]) * (P[i+1]) / (x_m[i+1] - x_m[i])
A[0,1] = (1./vols[i]) * (P[i+1]) / (x_m[i+1] - x_m[i])
i=N-1
A[i,i-1] = (1./vols[i]) * (P[i]) / (x_m[i] - x_m[i-1])
A[i,i ] = -(1./vols[i]) * (P[i]) / (x_m[i] - x_m[i-1])
return A
class MyProblem( Implicit_Problem ) :
def __init__(self, Na, Ns, Nc, Nra, Nrc, X, Ra, Rc, Ac, bsp_dir, y0, yd0, name ) :
Implicit_Problem.__init__(self,y0=y0,yd0=yd0,name=name)
self.T = 298.15 # Cell temperature, [K]
self.Ac = Ac # Cell coated area, [m^2]
# Control volumes and node points (mid node points and edge node points)
self.Ns = Ns
self.Na = Na
self.Nc = Nc
self.N = Na + Ns + Nc
self.X = X
self.x_e = numpy.linspace( 0.0, X, N+1 )
self.x_m = numpy.array( [ 0.5*(self.x_e[i+1]+self.x_e[i]) for i in range(N) ], dtype='d' )
self.vols = numpy.array( [ (self.x_e[i+1] - self.x_e[i]) for i in range(N)], dtype='d' )
# Radial mesh
self.Nra = Nra
self.Nrc = Nrc
k=0.8
self.r_e_a = nonlinspace( Ra, k, Nra )
self.r_m_a = numpy.array( [ 0.5*(self.r_e_a[i+1]+self.r_e_a[i]) for i in range(Nra-1) ], dtype='d' )
self.r_e_c = nonlinspace( Rc, k, Nrc )
self.r_m_c = numpy.array( [ 0.5*(self.r_e_c[i+1]+self.r_e_c[i]) for i in range(Nrc-1) ], dtype='d' )
self.vols_ra_e = numpy.array( [1/3.*(self.r_m_a[0]**3)] + [ 1/3.*(self.r_m_a[i+1] **3 - self.r_m_a[i] **3) for i in range(Nra-2)] + [1/3.*(self.r_e_a[-1]**3 - self.r_m_a[-1]**3)], dtype='d' )
self.vols_rc_e = numpy.array( [1/3.*(self.r_m_c[0]**3)] + [ 1/3.*(self.r_m_c[i+1] **3 - self.r_m_c[i] **3) for i in range(Nrc-2)] + [1/3.*(self.r_e_c[-1]**3 - self.r_m_c[-1]**3)], dtype='d' )
self.vols_ra_m = numpy.array( [ 1/3.*(self.r_e_a[i+1]**3 - self.r_e_a[i]**3) for i in range(Nra-1)], dtype='d' )
self.vols_rc_m = numpy.array( [ 1/3.*(self.r_e_c[i+1]**3 - self.r_e_c[i]**3) for i in range(Nrc-1)], dtype='d' )
# Useful sub-meshes for the phi_s functions
self.x_m_a = self.x_m[:Na]
self.x_m_c = self.x_m[-Nc:]
self.x_e_a = self.x_e[:Na+1]
self.x_e_c = self.x_e[-Nc-1:]
self.vols_a = self.vols[:Na]
self.vols_c = self.vols[-Nc:]
self.num_diff_vars = self.N + self.Nra*self.Na + self.Nrc*self.Nc
self.num_algr_vars = self.N + self.Na+self.Nc
# Volume fraction vectors and matrices for effective parameters
self.La, self.Ls, self.Lc = self.Na*X/self.N, self.Ns*X/self.N, self.Nc*X/self.N
self.Na, self.Ns, self.Nc = Na, Ns, Nc
eps_a = 0.3
eps_s = 0.5
eps_c = 0.25
ba, bs, bc = 0.8, 0.5, 0.5
eps_a_vec = [ eps_a for i in range(Na) ] # list( eps_a + eps_a/2.*numpy.sin(numpy.linspace(0.,Na/4,Na)) ) # list(eps_a + eps_a*numpy.random.randn(Na)/5.) #
eps_s_vec = [ eps_s for i in range(Ns) ]
eps_c_vec = [ eps_c for i in range(Nc) ] # list( eps_c + eps_c/2.*numpy.sin(numpy.linspace(0.,Nc/4,Nc)) ) # list(eps_c + eps_c*numpy.random.randn(Nc)/5.) #
self.eps_m = numpy.array( eps_a_vec + eps_s_vec + eps_c_vec, dtype='d' )
self.k_m = 1./self.eps_m
self.eps_mb = numpy.array( [ ea**ba for ea in eps_a_vec ] + [ es**bs for es in eps_s_vec ] + [ ec**bc for ec in eps_c_vec ], dtype='d' )
self.eps_eff = numpy.array( [ ea**(1.+ba) for ea in eps_a_vec ] + [ es**(1.+bs) for es in eps_s_vec ] + [ ec**(1.+bc) for ec in eps_c_vec ], dtype='d' )
self.eps_a_eff = self.eps_eff[:Na]
self.eps_c_eff = self.eps_eff[-Nc:]
self.K_m = numpy.diag( self.k_m )
t_plus = 0.4
F = 96485.0
self.t_plus = t_plus
self.F = F
self.R_gas = 8.314
self.Rp_a = Ra
self.Rp_c = Rc
as_a = 3.*numpy.array(eps_a_vec, dtype='d')/self.Rp_a
as_c = 3.*numpy.array(eps_c_vec, dtype='d')/self.Rp_c
self.as_a = as_a
self.as_c = as_c
self.as_a_mean = 1./self.La*sum( [ asa*v for asa,v in zip(as_a, self.vols[:Na]) ] )
self.as_c_mean = 1./self.Lc*sum( [ asc*v for asc,v in zip(as_c, self.vols[-Nc:]) ] )
print 'asa diff', self.as_a_mean - as_a[0]
print 'asc diff', self.as_c_mean - as_c[0]
# Electrolyte constant B_ce matrix
Ba = [ (1.-t_plus)*asa/ea for ea, asa in zip(eps_a_vec,as_a) ]
Bs = [ 0.0 for i in range(Ns) ]
Bc = [ (1.-t_plus)*asc/ec for ec, asc in zip(eps_c_vec,as_c) ]
self.B_ce = numpy.diag( numpy.array(Ba+Bs+Bc, dtype='d') )
Bap = [ asa*F for asa in as_a ]
Bsp = [ 0.0 for i in range(Ns) ]
Bcp = [ asc*F for asc in as_c ]
self.B2_pe = numpy.diag( numpy.array(Bap+Bsp+Bcp, dtype='d') )
# Solid phase parameters and j vector matrices
self.sig_a = 100. # [S/m]
self.sig_c = 100. # [S/m]
self.sig_a_eff = self.sig_a * self.eps_a_eff
self.sig_c_eff = self.sig_c * self.eps_c_eff
self.A_ps_a = flux_mat_builder( self.Na, self.x_m_a, numpy.ones_like(self.vols_a), self.sig_a_eff )
self.A_ps_c = flux_mat_builder( self.Nc, self.x_m_c, numpy.ones_like(self.vols_c), self.sig_c_eff )
# Grounding form for BCs (was only needed during testing, before BVK was incorporated for coupling
self.A_ps_a[-1,-1] = 2*self.A_ps_a[-1,-1]
self.A_ps_c[ 0, 0] = 2*self.A_ps_c[ 0, 0]
Baps = numpy.array( [ asa*F*dxa for asa,dxa in zip(as_a, self.vols_a) ], dtype='d' )
Bcps = numpy.array( [ asc*F*dxc for asc,dxc in zip(as_c, self.vols_c) ], dtype='d' )
self.B_ps_a = numpy.diag( Baps )
self.B_ps_c = numpy.diag( Bcps )
self.B2_ps_a = numpy.zeros( self.Na, dtype='d' )
self.B2_ps_a[ 0] = -1.
self.B2_ps_c = numpy.zeros( self.Nc, dtype='d' )
self.B2_ps_c[-1] = -1.
# Solid phase diffusion model
Dsa = 1e-12
Dsc = 1e-14
self.Dsa = Dsa
self.Dsc = Dsc
self.csa_max = 30555.0 # [mol/m^3]
self.csc_max = 51554.0 # [mol/m^3]
# Two parameter Solid phase diffusion model
# self.B_cs_a = numpy.diag( numpy.array( [-3.0/self.Rp_a for i in range(Na)], dtype='d' ) )
# self.B_cs_c = numpy.diag( numpy.array( [-3.0/self.Rp_c for i in range(Nc)], dtype='d' ) )
# self.C_cs_a = numpy.eye(Na)
# self.C_cs_c = numpy.eye(Nc)
# self.D_cs_a = numpy.diag( numpy.array( [-self.Rp_a/Dsa/5.0 for i in range(Na)], dtype='d' ) )
# self.D_cs_c = numpy.diag( numpy.array( [-self.Rp_c/Dsc/5.0 for i in range(Nc)], dtype='d' ) )
# 1D spherical diffusion model
self.A_csa_single = self.build_Ac_mat( Nra, Dsa*numpy.ones_like(self.r_m_a), self.r_m_a, self.r_e_a, self.vols_ra_e )
self.A_csc_single = self.build_Ac_mat( Nrc, Dsc*numpy.ones_like(self.r_m_c), self.r_m_c, self.r_e_c, self.vols_rc_e )
# b = self.A_csa_single.reshape(1,Nra,Nra).repeat(Na,axis=0)
b = [self.A_csa_single]*Na
self.A_cs_a = scipy.linalg.block_diag( *b )
b = [self.A_csc_single]*Nc
self.A_cs_c = scipy.linalg.block_diag( *b )
B_csa_single = numpy.array( [ 0. for i in range(Nra) ], dtype='d' )
B_csa_single[-1] = -1.*self.r_e_a[-1]**2
A1 = self.build_Mc_A1mat( self.vols_ra_e )
self.B_csa_single = A1.dot(B_csa_single)
B_csc_single = numpy.array( [ 0. for i in range(Nrc) ], dtype='d' )
B_csc_single[-1] = -1.*self.r_e_c[-1]**2
A1 = self.build_Mc_A1mat( self.vols_rc_e )
self.B_csc_single = A1.dot(B_csc_single)
b = [self.B_csa_single]*Na
self.B_cs_a = scipy.linalg.block_diag( *b ).T
b = [self.B_csc_single]*Nc
self.B_cs_c = scipy.linalg.block_diag( *b ).T
self.D_cs_a = scipy.linalg.block_diag( *[[0.0 for i in range(self.Nra-1)]+[1.0]]*Na )
self.D_cs_c = scipy.linalg.block_diag( *[[0.0 for i in range(self.Nrc-1)]+[1.0]]*Nc )
# OCV
Ua_path = bsp_dir+'data/Model_v1/Model_Pars/solid/thermodynamics/uref_anode_bigx.csv'
Uc_path = bsp_dir+'data/Model_v1/Model_Pars/solid/thermodynamics/uref_cathode_bigx.csv'
self.uref_a, self.uref_c, self.duref_a, self.duref_c = get_smooth_Uref_data( Ua_path, Uc_path, ffa=0.4, ffc=0.2 )
# Reaction kinetics parameters
self.io_a = 1.0 # [A/m^2]
self.io_c = 1.0 # [A/m^2]
## System indices
# Differential vars
self.ce_inds = range( self.N )
self.ce_inds_r = numpy.reshape( self.ce_inds, [len(self.ce_inds),1] )
self.ce_inds_c = numpy.reshape( self.ce_inds, [1,len(self.ce_inds)] )
self.csa_inds = range( self.N, self.N + (self.Na*self.Nra) )
self.csa_inds_r = numpy.reshape( self.csa_inds, [len(self.csa_inds),1] )
self.csa_inds_c = numpy.reshape( self.csa_inds, [1,len(self.csa_inds)] )
self.csc_inds = range( self.N + (self.Na*self.Nra), self.N + (self.Na*self.Nra) + (self.Nc*self.Nrc) )
self.csc_inds_r = numpy.reshape( self.csc_inds, [len(self.csc_inds),1] )
self.csc_inds_c = numpy.reshape( self.csc_inds, [1,len(self.csc_inds)] )
# Algebraic vars
c_end = self.N + (self.Na*self.Nra) + (self.Nc*self.Nrc)
self.pe_inds = range( c_end, c_end +self.N )
self.pe_inds_r = numpy.reshape( self.pe_inds, [len(self.pe_inds),1] )
self.pe_inds_c = numpy.reshape( self.pe_inds, [1,len(self.pe_inds)] )
self.pe_a_inds = range( c_end, c_end +self.Na )
self.pe_a_inds_r = numpy.reshape( self.pe_a_inds, [len(self.pe_a_inds),1] )
self.pe_a_inds_c = numpy.reshape( self.pe_a_inds, [1,len(self.pe_a_inds)] )
self.pe_c_inds = range( c_end+self.Na+self.Ns, c_end+self.Na+self.Ns +self.Nc )
self.pe_c_inds_r = numpy.reshape( self.pe_c_inds, [len(self.pe_c_inds),1] )
self.pe_c_inds_c = numpy.reshape( self.pe_c_inds, [1,len(self.pe_c_inds)] )
self.pa_inds = range( c_end+self.N, c_end+self.N +self.Na )
self.pa_inds_r = numpy.reshape( self.pa_inds, [len(self.pa_inds),1] )
self.pa_inds_c = numpy.reshape( self.pa_inds, [1,len(self.pa_inds)] )
self.pc_inds = range( c_end+self.N+self.Na, c_end+self.N+self.Na + self.Nc )
self.pc_inds_r = numpy.reshape( self.pc_inds, [len(self.pc_inds),1] )
self.pc_inds_c = numpy.reshape( self.pc_inds, [1,len(self.pc_inds)] )
# second set for manual jac version
c_end = 0
self.pe_inds2 = range( c_end, c_end +self.N )
self.pe_inds_r2 = numpy.reshape( self.pe_inds2, [len(self.pe_inds2),1] )
self.pe_inds_c2 = numpy.reshape( self.pe_inds2, [1,len(self.pe_inds2)] )
self.pe_a_inds2 = range( c_end, c_end +self.Na )
self.pe_a_inds_r2 = numpy.reshape( self.pe_a_inds2, [len(self.pe_a_inds2),1] )
self.pe_a_inds_c2 = numpy.reshape( self.pe_a_inds2, [1,len(self.pe_a_inds2)] )
self.pe_c_inds2 = range( c_end+self.Na+self.Ns, c_end+self.Na+self.Ns +self.Nc )
self.pe_c_inds_r2 = numpy.reshape( self.pe_c_inds2, [len(self.pe_c_inds2),1] )
self.pe_c_inds_c2 = numpy.reshape( self.pe_c_inds2, [1,len(self.pe_c_inds2)] )
self.pa_inds2 = range( c_end+self.N, c_end+self.N +self.Na )
self.pa_inds_r2 = numpy.reshape( self.pa_inds2, [len(self.pa_inds2),1] )
self.pa_inds_c2 = numpy.reshape( self.pa_inds2, [1,len(self.pa_inds2)] )
self.pc_inds2 = range( c_end+self.N+self.Na, c_end+self.N+self.Na + self.Nc )
self.pc_inds_r2 = numpy.reshape( self.pc_inds2, [len(self.pc_inds2),1] )
self.pc_inds_c2 = numpy.reshape( self.pc_inds2, [1,len(self.pc_inds2)] )
def set_iapp( self, I_app ) :
i_app = I_app / self.Ac
self.i_app = i_app
j_in_a = i_app / ( self.La*self.as_a_mean*self.F )
j_in_c = -i_app / ( self.Lc*self.as_c_mean*self.F )
# Set the input j
ja = [ j_in_a for i in range(self.Na) ]
js = [ 0.0 for i in range(self.Ns) ]
jc = [ j_in_c for i in range(self.Nc) ]
self.j = numpy.array( ja+js+jc, dtype='d' )
self.j_a = numpy.array( ja, dtype='d' )
self.j_c = numpy.array( jc, dtype='d' )
# cs mats
def build_Mc_A1mat( self, V ) :
M1 = numpy.zeros( [len(V), len(V)] )
M2 = numpy.diag( V )
M1[ 0,[0 , 1]] = [3/8., 1/8.]
M1[-1,[-2,-1]] = [1/8., 3/8.]
for i in range(1,len(V)-1) :
M1[i,[i-1,i,i+1]] = [ 1/8., 6/8., 1/8. ]
Mc = M1.dot(M2)
A1 = numpy.linalg.inv( Mc )
return A1
def build_A2_mat( self, N, D, r_mid, r_edge, vols ) :
A2 = numpy.zeros( [N,N] )
for i in range(1,N-1) :
A2[i,i-1] = (D[i-1]*(r_mid[i-1]**2)) / (r_edge[i ] - r_edge[i-1])
A2[i,i ] = -(D[i-1]*(r_mid[i-1]**2)) / (r_edge[i ] - r_edge[i-1]) - (D[i]*(r_mid[i]**2)) / (r_edge[i+1] - r_edge[i])
A2[i,i+1] = (D[i ]*(r_mid[i ]**2)) / (r_edge[i+1] - r_edge[i ])
i=0
A2[0,0] = -(D[i]*(r_mid[i]**2)) / (r_edge[i+1] - r_edge[i])
A2[0,1] = (D[i]*(r_mid[i]**2)) / (r_edge[i+1] - r_edge[i])
i=N-1
A2[i,i-1] = (D[i-1]*(r_mid[i-1]**2)) / (r_edge[i] - r_edge[i-1])
A2[i,i ] = -(D[i-1]*(r_mid[i-1]**2)) / (r_edge[i] - r_edge[i-1])
return A2
def build_Ac_mat( self, N, D_mid, r_mid, r_edge, vols_edge ) :
A1 = self.build_Mc_A1mat( vols_edge )
A2 = self.build_A2_mat( N, D_mid, r_mid, r_edge, vols_edge )
Ac = A1.dot(A2)
return Ac
## Define c_e functions
def build_Ace_mat( self, c ) :
D_eff = self.Diff_ce( c )
A = self.K_m.dot( flux_mat_builder( self.N, self.x_m, self.vols, D_eff ) )
return A
def Diff_ce( self, c ) :
T = self.T
D_ce = 1e-4 * 10.0**( -4.43 - (54./(T-229.-5e-3*c)) - (0.22e-3*c) ) ## Torchio (LIONSIMBA) ECS paper
D_mid = D_ce * self.eps_eff
if type(c) == float :
D_edge = D_mid
else :
D_edge = mid_to_edge( D_mid, self.x_e )
return D_edge
## Define phi_e functions
def build_Ape_mat( self, c ) :
k_eff = self.kapp_ce( c )
A = flux_mat_builder( self.N, self.x_m, self.vols, k_eff )
A[-1,-1] = 2*A[-1,-1]
return A
def build_Bpe_mat( self, c ) :
gam = 2.*(1.-self.t_plus)*self.R_gas / self.F
k_eff = self.kapp_ce( c )
B1 = numpy.diag( 1./c ).dot( flux_mat_builder( self.N, self.x_m, self.vols, k_eff*self.T*gam ) )
return B1
def kapp_ce( self, c ) :
T = self.T
k_ce = 1e-4 * c *( -10.5 +0.668e-3*c + 0.494e-6*c**2
+ (0.074 - 1.78*1e-5*c - 8.86e-10*c**2)*T
+ (-6.96e-5 + 2.8e-8*c)*T**2 )**2 ## Torchio (LIONSIMBA) ECS paper
k_mid = k_ce * self.eps_eff
if type(c) == float :
k_edge = k_mid
else :
k_edge = mid_to_edge( k_mid, self.x_e )
return k_edge
def build_Bjac_mat( self, eta, a, b ) :
d = a*numpy.cosh( b*eta )*b
# d = a*numpy.ones_like( b*eta )*b
return numpy.diag( d )
## Define system equations
def res( self, t, y, yd ) :
## Parse out the states
# E-lyte conc
ce = y[ self.ce_inds]
c_dots = yd[self.ce_inds]
# Solid conc a:anode, c:cathode
csa = y[ self.csa_inds]
csc = y[ self.csc_inds]
csa_dt = yd[self.csa_inds]
csc_dt = yd[self.csc_inds]
# E-lyte potential
phi = y[self.pe_inds]
# Solid potential
phi_s_a = y[self.pa_inds]
phi_s_c = y[self.pc_inds]
## Grab state dependent matrices
# For E-lyte conc and potential (i.e., De(ce), kapp_e(ce))
A_ce = self.build_Ace_mat( ce )
A_pe = self.build_Ape_mat( ce )
B_pe = self.build_Bpe_mat( ce )
ja = self.j_a
jc = self.j_c
j = self.j
## Compute the residuals
# Time deriv components
r1 = c_dots - ( ((A_ce.dot(ce)).flatten() + (self.B_ce.dot(j)).flatten()) ) # E-lyte conc
r2 = csa_dt - (self.A_cs_a.dot(csa).flatten() + self.B_cs_a.dot(ja).flatten()) # Anode conc
r3 = csc_dt - (self.A_cs_c.dot(csc).flatten() + self.B_cs_c.dot(jc).flatten()) # Cathode conc
r4 = A_pe.dot(phi).flatten() - B_pe.dot(ce).flatten() + self.B2_pe.dot(j).flatten() # E-lyte potential
r5 = self.A_ps_a.dot(phi_s_a).flatten() - self.B_ps_a.dot(ja).flatten() - self.B2_ps_a*self.i_app # Anode potential
r6 = self.A_ps_c.dot(phi_s_c).flatten() - self.B_ps_c.dot(jc).flatten() + self.B2_ps_c*self.i_app # Cathode potential
res_out = numpy.concatenate( [r1, r2, r3, r4, r5, r6] )
return res_out
def jac( self, c, t, y, yd ) :
### Setup
## Parse out the states
# E-lyte conc
ce = y[ self.ce_inds]
## Grab state dependent matrices
# For E-lyte conc and potential (i.e., De(ce), kapp_e(ce))
A_ce = self.build_Ace_mat( ce )
A_pe = self.build_Ape_mat( ce )
B_pe = self.build_Bpe_mat( ce )
###
### Build the Jac matrix
## Self coupling
A_dots = numpy.diag( [1*c for i in range(self.num_diff_vars)] )
j_c = A_dots - scipy.linalg.block_diag( A_ce, self.A_cs_a, self.A_cs_c )
j = scipy.linalg.block_diag( j_c, A_pe, self.A_ps_a, self.A_ps_c )
j[ self.pe_inds_r, self.ce_inds_c ] = -B_pe
###
return j
csa_max = 30555.0 # [mol/m^3]
csc_max = 51554.0 # [mol/m^3]
#bsp_dir = '/home/m_klein/Projects/battsimpy/'
bsp_dir = '/home/mk-sim-linux/Battery_TempGrad/Python/batt_simulation/battsimpy/'
Ua_path = bsp_dir+'data/Model_v1/Model_Pars/solid/thermodynamics/uref_anode_bigx.csv'
Uc_path = bsp_dir+'data/Model_v1/Model_Pars/solid/thermodynamics/uref_cathode_bigx.csv'
uref_a, uref_c, duref_a, duref_c = get_smooth_Uref_data( Ua_path, Uc_path, ffa=0.4, ffc=0.2 )
xa_init, xc_init = 0.5, 0.5
ca_init = xa_init*csa_max
cc_init = xc_init*csc_max
Ua_init = uref_a( xa_init )
Uc_init = uref_c( xc_init )
### Mesh
N = 80
Ns = int(N/8.)
Na = int(N/3.)
Nc = N - Ns - Na
X = 165e-6 # [m]
Nra = 10
Nrc = 20
Ra = 10.0e-6
Rc = 6.00e-6
cell_coated_area = 1.0 # [m^2]
I_app = 10.0 # A
#i_app = I_app / cell_coated_area # current density, [A/m^2]
### Initial conditions
# E-lyte conc
c_init = 1000.0 # [mol/m^3]
c_centered = c_init*numpy.ones( N, dtype='d' )
# E-lyte potential
p_init = 0.0 # [V]
p_centered = p_init*numpy.ones( N, dtype='d' )
# Solid potential on anode and cathode
pa_init = 0.0 # [V]
pa_centered = pa_init*numpy.ones( Na, dtype='d' )
pc_init = 0.0 # [V]
pc_centered = pc_init*numpy.ones( Nc, dtype='d' )
# Solid conc on anode and cathode
ca_centered = ca_init*numpy.ones( Na*Nra, dtype='d' )
cc_centered = cc_init*numpy.ones( Nc*Nrc, dtype='d' )
num_diff_vars = len(c_centered)+len(ca_centered)+len(cc_centered)
num_algr_vars = len(p_centered)+len(pa_centered)+len(pc_centered)
#The initial conditons
y0 = numpy.concatenate( [c_centered, ca_centered, cc_centered, p_centered, pa_centered, pc_centered] ) #Initial conditions
yd0 = [0.0 for i in range(len(y0))] #Initial conditions
#Create an Assimulo implicit problem
imp_mod = MyProblem(Na,Ns,Nc,Nra,Nrc,X,Ra,Rc,cell_coated_area,bsp_dir,y0,yd0,'Example using an analytic Jacobian')
#Sets the options to the problem
imp_mod.algvar = [1.0 for i in range(num_diff_vars)] + [0.0 for i in range(num_algr_vars)] #Set the algebraic components
#Create an Assimulo implicit solver (IDA)
imp_sim = IDA(imp_mod) #Create a IDA solver
#Sets the paramters
imp_sim.atol = 1e-5 #Default 1e-6
imp_sim.rtol = 1e-5 #Default 1e-6
imp_sim.suppress_alg = True #Suppres the algebraic variables on the error test
### Simulate
imp_mod.set_iapp( I_app/10. )
imp_sim.make_consistent('IDA_YA_YDP_INIT')
ta, ya, yda = imp_sim.simulate(0.1,5)
imp_mod.set_iapp( I_app/2. )
imp_sim.make_consistent('IDA_YA_YDP_INIT')
tb, yb, ydb = imp_sim.simulate(0.2,5)
imp_mod.set_iapp( I_app )
imp_sim.make_consistent('IDA_YA_YDP_INIT')
# Sim step 1
t1, y1, yd1 = imp_sim.simulate(100,100)
#t1, y1, yd1 = imp_sim.simulate(1000,1000)
imp_mod.set_iapp( 0.0 )
imp_sim.make_consistent('IDA_YA_YDP_INIT')
# Sim step 1
t2, y2, yd2 = imp_sim.simulate(200,100)
print 'Performing plots...'
# extract variables
im = imp_mod
ce_1 = y1[:,im.ce_inds]
ca_1 = y1[:,im.csa_inds]
cc_1 = y1[:,im.csc_inds]
# Parse out particles
#c_s_a_list1 = [ [] for it in range(len(t1)) ]
#c_s_c_list1 = [ [] for it in range(len(t1)) ]
#for it in range(len(t1)) :
# c_s_a_list1[it] = numpy.reshape( y1[it, imp_mod.csa_inds].T, (imp_mod.Na,imp_mod.Nra) ).T
# c_s_c_list1[it] = numpy.reshape( y1[it, imp_mod.csc_inds].T, (imp_mod.Nc,imp_mod.Nrc) ).T
pe_1 = y1[:,im.pe_inds]
pa_1 = y1[:,im.pa_inds]
pc_1 = y1[:,im.pc_inds]
ce_2 = y2[:,im.ce_inds]
ca_2 = y2[:,im.csa_inds]
cc_2 = y2[:,im.csc_inds]
# Parse out particles
#c_s_a_list2 = [ [] for it in range(len(t2)) ]
#c_s_c_list2 = [ [] for it in range(len(t2)) ]
#for it in range(len(t2)) :
# c_s_a_list2[it] = numpy.reshape( y2[it, imp_mod.csa_inds].T, (imp_mod.Na,imp_mod.Nra) ).T
# c_s_c_list2[it] = numpy.reshape( y2[it, imp_mod.csc_inds].T, (imp_mod.Nc,imp_mod.Nrc) ).T
pe_2 = y2[:,im.pe_inds]
pa_2 = y2[:,im.pa_inds]
pc_2 = y2[:,im.pc_inds]
#Plot
# t1
# Plot through space
f, ax = plt.subplots(2,5)
# ce vs x
ax[0,0].plot(imp_mod.x_m*1e6,ce_1.T)
# pe vs x
ax[0,1].plot(imp_mod.x_m*1e6,pe_1.T)
# pa vs x
ax[0,2].plot(imp_mod.x_m_a*1e6,pa_1.T)
# pc vs x
ax[0,2].plot(imp_mod.x_m_c*1e6,pc_1.T)
# ca vs x
#ax[0,3].plot(imp_mod.x_m_a*1e6,ca_1.T)
## cc vs x
#ax[0,3].plot(imp_mod.x_m_c*1e6,cc_1.T)
#for it in range(len(t1)) :
# #cs_a
# ax[0,3].plot( imp_mod.r_e_a, c_s_a_list1[it] )
# #cs_c
# ax[0,3].plot( imp_mod.r_e_a[-1]*1.2 + imp_mod.r_e_c, c_s_c_list1[it] )
ax[0,0].set_title('t1 c')
ax[0,0].set_xlabel('Cell Thickness [$\mu$m]')
ax[0,0].set_ylabel('E-lyte Conc. [mol/m$^3$]')
ax[0,1].set_title('t1 p')
ax[0,1].set_xlabel('Cell Thickness [$\mu$m]')
ax[0,1].set_ylabel('E-lyte Potential [V]')
ax[0,2].set_title('t1 p solid')
ax[0,2].set_xlabel('Cell Thickness [$\mu$m]')
ax[0,2].set_ylabel('Solid Potential [V]')
ax[0,3].set_title('t1 conc solid')
ax[0,3].set_xlabel('Cell Thickness [$\mu$m]')
ax[0,3].set_ylabel('Solid Conc. [mol/m$^3$]')
# t2
ax[1,0].plot(imp_mod.x_m*1e6,ce_2.T)
ax[1,1].plot(imp_mod.x_m*1e6,pe_2.T)
ax[1,2].plot(imp_mod.x_m_a*1e6,pa_2.T)
ax[1,2].plot(imp_mod.x_m_c*1e6,pc_2.T)
#ax[1,3].plot(imp_mod.x_m_a*1e6,ca_2.T)
##ax[1,3].plot(imp_mod.x_m_c*1e6,cc_2.T)
#for it in range(len(t2)) :
# #cs_a
# ax[1,3].plot( imp_mod.r_e_a, c_s_a_list2[it] )
# #cs_c
# ax[1,3].plot( imp_mod.r_e_a[-1]*1.2 + imp_mod.r_e_c, c_s_c_list2[it] )
ax[1,0].set_title('t2 c')
ax[1,0].set_xlabel('Cell Thickness [$\mu$m]')
ax[1,0].set_ylabel('E-lyte Conc. [mol/m$^3$]')
ax[1,1].set_title('t2 p e-lyte')
ax[1,1].set_xlabel('Cell Thickness [$\mu$m]')
ax[1,1].set_ylabel('E-lyte Potential [V]')
ax[1,2].set_title('t2 p solid')
ax[1,2].set_xlabel('Cell Thickness [$\mu$m]')
ax[1,2].set_ylabel('Solid Potential [V]')
ax[1,3].set_title('t2 Solid Conc.')
ax[1,3].set_xlabel('Cell Thickness [$\mu$m]')
ax[1,3].set_ylabel('Solid Conc. [mol/m$^3$]')
plt.tight_layout()
# Plot through time
f, ax = plt.subplots(1,4)
ax[0].plot(t1,ce_1)
ax[1].plot(t1,pe_1)
ax[2].plot(t1,pa_1)
ax[2].plot(t1,pc_1)
ax[3].plot(t1,ca_1)
ax[3].plot(t1,cc_1)
ax[0].plot(t2,ce_2)
ax[1].plot(t2,pe_2)
ax[2].plot(t2,pa_2)
ax[2].plot(t2,pc_2)
ax[3].plot(t2,ca_2)
ax[3].plot(t2,cc_2)
ax[0].set_ylabel('E-lyte Conc. [mol/m$^3$]')
ax[0].set_xlabel('Time [s]')
ax[1].set_ylabel('E-lyte Potential [V]')
ax[1].set_xlabel('Time [s]')
ax[2].set_ylabel('Solid Potential [V]')
ax[2].set_xlabel('Time [s]')
ax[3].set_ylabel('Solid Conc. [mol/m$^3$]')
ax[3].set_xlabel('Time [s]')
plt.tight_layout()
plt.figure()
plt.plot( t1, pc_1[:,-1] - pa_1[:,0] )
plt.plot( t2, pc_2[:,-1] - pa_2[:,0] )
plt.show()
#imp_mod = MyProblem(Na,Ns,Nc,Nra,Nrc,X,Ra,Rc,cell_coated_area,bsp_dir,y0,yd0,'Example using an analytic Jacobian')
## my own time solver
#delta_t = 1.0
#tf = 10.
#time = [ i*delta_t for i in range(int(tf/delta_t)+1) ]
#print time
#x_out = numpy.zeros( [num_diff_vars, len(time)] )
#z_out = numpy.zeros( [num_algr_vars, len(time)] )
#x_out[:,0] = numpy.concatenate( [c_centered, ca_centered, cc_centered] )
#z_out[:,0] = numpy.concatenate( [p_centered, pa_centered, pc_centered] )
#for it, t in enumerate(time[1:]) :
## if it == 0 :
## Cur_vec = [ 0.0, 0.0, 0.01*I_app ]
## elif it == 1 :
## Cur_vec = [ 0.0, 0.01*I_app, 0.1*I_app ]
## elif it == 2 :
## Cur_vec = [ 0.01*I_app, 0.1*I_app, 0.5*I_app ]
## elif it == 3 :
## Cur_vec = [ 0.1*I_app, 0.5*I_app, I_app ]
## elif it == 4 :
## Cur_vec = [ 0.5*I_app, I_app, I_app ]
## else :
## Cur_vec = [ I_app, I_app, I_app ]
# if it == 0 :
# Cur_vec = [ 0.0, 0.0, I_app ]
# elif it == 1 :
# Cur_vec = [ 0.0, I_app, I_app ]
# else :
# Cur_vec = [ I_app, I_app, I_app ]
#
# x_out[:,it+1], z_out[:,it+1], newtonStats = imp_mod.cn_solver( x_out[:,it], z_out[:,it], Cur_vec, delta_t )
## Parse out particles
#c_s_a_list = [ [] for it in range(len(time)) ]
#c_s_c_list = [ [] for it in range(len(time)) ]
#for it in range(len(time)) :
# c_s_a_list[it] = numpy.reshape( x_out[imp_mod.csa_inds, it], (imp_mod.Na,imp_mod.Nra) ).T
# c_s_c_list[it] = numpy.reshape( x_out[imp_mod.csc_inds, it], (imp_mod.Nc,imp_mod.Nrc) ).T
#plt.close()
#f, ax = plt.subplots(1,3)
## c_e
#ax[0].plot( imp_mod.x_m, x_out[:imp_mod.N,:-1] )
## phi_e
#ax[1].plot( imp_mod.x_m, z_out[:imp_mod.N,:-1] )
## phi_s
#ax[2].plot( imp_mod.x_m_a, z_out[-imp_mod.Na-imp_mod.Nc:-imp_mod.Nc,:-1] )
#ax[2].plot( imp_mod.x_m_c, z_out[-imp_mod.Nc:,:-1] )
#f2, ax2 = plt.subplots(1,2)
## cs_a
#for it in range(len(time)) :
# #cs_a
# ax2[0].plot( imp_mod.r_e_a, c_s_a_list[it] )
# #cs_c
# ax2[1].plot( imp_mod.r_e_c, c_s_c_list[it] )
#plt.show()
#print z_out
# def dae_system( self, x, z, Input, get_mats=0 ) :
# self.set_iapp( Input )
# y = numpy.concatenate([x,z])
# ## Parse out the states
# # E-lyte conc
# ce = y[ self.ce_inds]
# # Solid conc a:anode, c:cathode
# csa = y[ self.csa_inds]
# csc = y[ self.csc_inds]
# # E-lyte potential
# phi = y[self.pe_inds]
# # Solid potential
# phi_s_a = y[self.pa_inds]
# phi_s_c = y[self.pc_inds]
# ## Grab state dependent matrices
# # For E-lyte conc and potential (i.e., De(ce), kapp_e(ce))
# A_ce = self.build_Ace_mat( ce )
# A_pe = self.build_Ape_mat( ce )
# B_pe = self.build_Bpe_mat( ce )
# ja = self.j_a
# jc = self.j_c
# j = self.j
# ## Compute the residuals
# # Time deriv components
# r1 = ( ((A_ce.dot(ce)).flatten() + (self.B_ce.dot(j)).flatten()) ) # E-lyte conc
# r2 = ( (self.A_cs_a.dot(csa)).flatten() + (self.B_cs_a.dot(ja)).flatten() ) # Anode conc
# r3 = ( (self.A_cs_c.dot(csc)).flatten() + (self.B_cs_c.dot(jc)).flatten() ) # Cathode conc
# r4 = A_pe.dot(phi).flatten() - B_pe.dot(ce).flatten() + self.B2_pe.dot(j).flatten() # E-lyte potential
# r5 = self.A_ps_a.dot(phi_s_a).flatten() - self.B_ps_a.dot(ja).flatten() - self.B2_ps_a*self.i_app # Anode potential
# r6 = self.A_ps_c.dot(phi_s_c).flatten() - self.B_ps_c.dot(jc).flatten() + self.B2_ps_c*self.i_app # Cathode potential
# if get_mats :
# res_out = numpy.concatenate( [r1,r2,r3] ), numpy.concatenate( [r4, r5, r6] ), { 'A_ce':A_ce, 'A_pe':A_pe, 'B_pe':B_pe }
# else :
# res_out = numpy.concatenate( [r1,r2,r3] ), numpy.concatenate( [r4, r5, r6] )
# return res_out
# def jac_system( self, mats ) :
# A_ce = mats['A_ce'] #self.build_Ace_mat( ce )
# A_pe = mats['A_pe'] #self.build_Ape_mat( ce )
# B_pe = mats['B_pe'] #self.build_Bpe_mat( ce )
# ##
# fx = scipy.linalg.block_diag( A_ce, self.A_cs_a, self.A_cs_c )
# ##
# ##
# fz = numpy.zeros( [self.num_diff_vars, self.num_algr_vars] )
# ##
# gx = numpy.zeros( [self.num_algr_vars, self.num_diff_vars] )
# # phi_e vs ce
# gx[ self.pe_inds_r2, self.ce_inds_c ] = -B_pe
# ##
# ##
# # z vs z
# gz = scipy.linalg.block_diag( A_pe, self.A_ps_a, self.A_ps_c )
# return fx, fz, gx, gz
# def cn_solver( self, x, z, Cur_vec, delta_t ) :
# """
# Crank-Nicholson solver for marching through time
# """
# Cur_prev, Cur, Cur_nxt = Cur_vec[0], Cur_vec[1], Cur_vec[2]
# maxIters = 20
# tol = 1e-5
# Nx = self.num_diff_vars
# Nz = self.num_algr_vars
# x_nxt = numpy.zeros( (Nx,maxIters), dtype='d' )
# z_nxt = numpy.zeros( (Nz,maxIters), dtype='d' )
# relres = numpy.zeros( maxIters, dtype='d' )
# relres[0] = 1.0
# var_flag = {'lim_on':0}
# # Solve for consistent ICs
# if Cur != Cur_prev :
# z_cons = numpy.zeros( (Nz, maxIters), dtype='d' )
# z_cons[:,0] = deepcopy(z)
# junk_f, g, mats = self.dae_system( x, z, Cur, get_mats=1 )
# for idx in range(maxIters-1) :
# (junk_fx, junk_fz, junk_gx, g_z) = self.jac_system( mats )
# Delta_z = -sparseSolve( sparseMat(g_z), g )
# z_cons[:,idx+1] = z_cons[:,idx] + Delta_z
# relres_z = numpy.linalg.norm(Delta_z,numpy.inf) / numpy.linalg.norm(z,numpy.inf)
# if relres_z < tol :
# break
# elif idx == maxIters-1 :
# print(('Warning: Max Newton iterations reached for consistency | RelChange=',relres_z*100.0))
# z = z_cons[:,idx+1]
# #print Cur
# f, g = self.dae_system( deepcopy(x), deepcopy(z), Cur )
# x_nxt[:,0] = deepcopy(x)
# z_nxt[:,0] = deepcopy(z)
#
# # plt.figure(1)
# # plt.plot( x_nxt[:,0] )
# # plt.plot( z_nxt[:,0] )
# # plt.show()
# for idx in range(maxIters-1) :
# f_nxt, g_nxt, mats = self.dae_system( x_nxt[:,idx], z_nxt[:,idx], Cur_nxt, get_mats=1 )
## print 'x:',x.shape
## print 'xnxt:',x_nxt[:,idx].shape
## print 'f:',f.shape
## print 'fnxt:',f_nxt.shape
## print 'z:', z.shape
## print 'g:', g.shape
## print 'znxt:', z_nxt[:,idx].shape
## print 'gnxt:', g_nxt.shape
# F1 = x - x_nxt[:,idx] + delta_t/2.*( f+f_nxt )
# F2 = g_nxt
# F = numpy.concatenate( (F1, F2), axis=0 )
# fx, fz, gx, gz = self.jac_system( mats )
# F1_x = -sparse.eye(len(x)) + delta_t/2. * fx
# F1_z = delta_t/2. * fz
# F2_x = gx
# F2_z = gz
# J = numpy.concatenate( (numpy.concatenate( (F1_x, F1_z), axis=1 ),
# numpy.concatenate( (F2_x, F2_z), axis=1 )) )
# Jsp = sparseMat( J )
# Delta_y = -sparseSolve( Jsp, F )
# x_nxt[:,idx+1] = x_nxt[:,idx] + Delta_y[:Nx]
# z_nxt[:,idx+1] = z_nxt[:,idx] + Delta_y[Nx:]
# # plt.figure(1)
# # plt.plot(Delta_y)
# # plt.figure(2)
# # plt.plot(x_nxt[:,idx])
# # plt.plot(x_nxt[:,idx+1])
#
## plt.show()
# y = numpy.concatenate( (x_nxt[:,idx+1], z_nxt[:,idx+1]), axis=0 )
# relres[idx+1] = numpy.linalg.norm( Delta_y, numpy.inf ) / numpy.linalg.norm( y, numpy.inf )
# if (relres[idx+1]<tol) and (numpy.linalg.norm(F, numpy.inf)<tol) :
# break
# elif idx==maxIters-1 :
# print( ('Warning: Max Newton iterations reached in main CN loop | RelChange = ',relres[-1]*100.0) )
# x_nxtf = x_nxt[:,idx+1]
# z_nxtf = z_nxt[:,idx+1]
# newtonStats = {'var_flag':var_flag}
# newtonStats['iters'] = idx
# newtonStats['relres'] = relres
# return x_nxtf, z_nxtf, newtonStats
|
matthewpklein/battsimpy
|
tests/dae_genPart_noJ.py
|
Python
|
gpl-3.0
| 37,032
|
[
"Gaussian"
] |
9405810524de695c4fe10c5bf2e2703335cae5f31747af4a74a2aeba6d59936c
|
# proxy module
from __future__ import absolute_import
from mayavi.sources.three_ds_importer import *
|
enthought/etsproxy
|
enthought/mayavi/sources/three_ds_importer.py
|
Python
|
bsd-3-clause
| 101
|
[
"Mayavi"
] |
362aa13c6ea925472f525b21d968fba4fbaadec428d74d39ec98e82b9e40820a
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2022, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
#"""
#PSLibrary Job File
######################
#Job files (extension '.job') are used inside the `pslibrary`_ package to house
#input data for pseudopotential generation using the atomic sub-package ('ld1.x')
#within the `Quantum ESPRESSO`_ quantum chemistry suite of tools. See for example,
#`this`_ job file.
#
#.. _pslibrary: https://github.com/dalcorso/pslibrary
#.. _Quantum ESPRESSO: https://github.com/QEF/q-e
#.. _this: https://github.com/dalcorso/pslibrary/blob/master/paw_ps_collection.job
#"""
#import re
#import numpy as np
#import pandas as pd
#from exatomic.exa import isotopes, Sections, Parser, TypedProperty, DataFrame
#
#
#class Element(Parser):
# """A single element's input file in the composite job file."""
# _key_config = "config"
# _key_ae_dct = {}
# _key_mrk = "["
# _key_resplit = re.compile("([1-9]*)([spdfghjklmn])([0-9-.]*)")
# _key_symbol = "title"
# _key_zed = "zed"
# _key_ps = "/"
# _key_ps_cols = ("n", "l_sym", "nps", "l", "occupation",
# "energy", "rcut_nc", "rcut", "misc")
# _key_ps_dtypes = [np.int64, "O", np.int64, np.int64, np.float64,
# np.float64, np.float64, np.float64, np.float64]
# ae = TypedProperty(DataFrame)
# ps = TypedProperty(DataFrame)
# z = TypedProperty(int)
# symbol = TypedProperty(str)
#
# def _parse(self):
# if str(self[0]).startswith("#"):
# return
# found = self.find(self._key_config, self._key_symbol,
# self._key_zed, self._key_ps)
# config = found[self._key_config][-1][1].split("=")[1]
# config = config.replace("'", "").replace(",", "").split(" ")
# nvals = []
# angmoms = []
# occs = []
# for item in config:
# if "[" in item:
# continue
# try:
# nval, angmom, occ = self._key_resplit.match(item.lower()).groups()
# nvals.append(nval)
# angmoms.append(angmom)
# occs.append(occ)
# except AttributeError:
# pass
# self.ae = pd.DataFrame.from_dict({'n': nvals, 'l': angmoms, 'occupation': occs})
# self.symbol = found[self._key_symbol][-1][1].split("=")[1].replace("'", "").replace(",", "").title()
# element = getattr(isotopes, self.symbol)
# self.z = element.Z
# ps = []
# for line in self[found[self._key_ps][-1][0]:]:
# if "#" in line:
# continue
# ls = line.split()
# if len(ls) > 7:
# dat = list(self._key_resplit.match(ls[0].lower()).groups())[:-1]
# dat += ls[1:]
# ps.append(dat)
# self.ps = pd.DataFrame(ps, columns=self._key_ps_cols)
# for i, col in enumerate(self.ps.columns):
# self.ps[col] = self.ps[col].astype(self._key_ps_dtypes[i])
#
#
#class PSLJobFile(Sections):
# """Input 'job' file in the pslibrary"""
# name = "pslibrary job file"
# description = "Parser for pslibrary input files"
# _key_sep = "EOF"
# _key_parser = Element
#
# def _parse(self):
# """Parse input data from pslibrary"""
# delims = self.find(self._key_sep, text=False)[self._key_sep]
# starts = delims[::2]
# ends = delims[1::2]
# names = [self._key_parser]*len(ends)
# self._sections_helper(parser=names, start=starts, end=ends)
|
exa-analytics/exatomic
|
exatomic/qe/psp/jobfile.py
|
Python
|
apache-2.0
| 3,549
|
[
"ESPResSo",
"Quantum ESPRESSO"
] |
ac7c2213ad40c046889eb8762ff1bb7049846b31c95df424b3e12da20498a086
|
#!/usr/bin/env python3
try:
import netCDF4 as netCDF
except:
print("netCDF4 is not installed!")
sys.exit(1)
class PISMDataset(netCDF.Dataset):
def create_time(self, use_bounds=False, length=None, units=None):
self.createDimension('time', size=length)
t_var = self.createVariable('time', 'f8', ('time',))
t_var.axis = "T"
t_var.long_name = "time"
if not units:
t_var.units = "seconds since 1-1-1" # just a default
else:
t_var.units = units
if use_bounds:
self.createDimension('n_bounds', 2)
self.createVariable("time_bounds", 'f8', ('time', 'n_bounds'))
t_var.bounds = "time_bounds"
def create_dimensions(self, x, y, time_dependent=False, use_time_bounds=False):
"""
Create PISM-compatible dimensions in a NetCDF file.
"""
if time_dependent and not 'time' in list(self.variables.keys()):
self.create_time(use_time_bounds)
self.createDimension('x', x.size)
self.createDimension('y', y.size)
x_var = self.createVariable('x', 'f8', ('x',))
x_var[:] = x
y_var = self.createVariable('y', 'f8', ('y',))
y_var[:] = y
x_var.axis = "X"
x_var.long_name = "X-coordinate in Cartesian system"
x_var.units = "m"
x_var.standard_name = "projection_x_coordinate"
y_var.axis = "Y"
y_var.long_name = "Y-coordinate in Cartesian system"
y_var.units = "m"
y_var.standard_name = "projection_y_coordinate"
self.sync()
def append_time(self, value, bounds=None):
if 'time' in list(self.dimensions.keys()):
time = self.variables['time']
N = time.size
time[N] = value
if bounds:
self.variables['time_bounds'][N, :] = bounds
def set_attrs(self, var_name, attrs):
"""attrs should be a list of (name, value) tuples."""
if not attrs:
return
for (name, value) in attrs.items():
if name == "_FillValue":
continue
setattr(self.variables[var_name], name, value)
def define_2d_field(self, var_name, time_dependent=False, dims=None, nc_type='f8', attrs=None):
"""
time_dependent: boolean
dims: an optional list of dimension names. use this to override the
default order ('time', 'y', 'x')
attrs: a dictionary of attributes
"""
if not dims:
if time_dependent:
dims = ('time', 'y', 'x')
else:
dims = ('y', 'x')
try:
var = self.variables[var_name]
except:
if attrs is not None and '_FillValue' in list(attrs.keys()):
var = self.createVariable(var_name, nc_type, dims,
fill_value=attrs['_FillValue'])
else:
var = self.createVariable(var_name, nc_type, dims)
self.set_attrs(var_name, attrs)
return var
def define_timeseries(self, var_name, attrs=None):
try:
if attrs is not None and '_FillValue' in list(attrs.keys()):
var = self.createVariable(var_name, 'f8', ('time',),
fill_value=attrs['_FillValue'])
else:
var = self.createVariable(var_name, 'f8', ('time',))
except:
var = self.variables[var_name]
self.set_attrs(var_name, attrs)
return var
def write(self, var_name, data, time_dependent=False, attrs=None):
"""
Write time-series or a 2D field to a file.
"""
if data.ndim == 1:
return self.write_timeseries(var_name, data, attrs=attrs)
elif data.ndim == 2:
return self.write_2d_field(var_name, data, time_dependent, attrs=attrs)
else:
return None
def write_2d_field(self, var_name, data, time_dependent=False, attrs=None):
"""
Write a 2D numpy array to a file in a format PISM can read.
"""
var = self.define_2d_field(var_name, time_dependent, attrs=attrs)
if time_dependent:
last_record = self.variables['time'].size - 1
var[last_record, :, :] = data
else:
var[:] = data
return var
def write_timeseries(self, var_name, data, attrs=None):
"""Write a 1D (time-series) array to a file."""
var = self.define_timeseries(var_name, attrs=attrs)
var[:] = data
return var
if __name__ == "__main__":
# produce a NetCDF file for testing
from numpy import linspace, meshgrid
nc = PISMDataset("foo.nc", 'w')
x = linspace(-100, 100, 101)
y = linspace(-100, 100, 201)
xx, yy = meshgrid(x, y)
nc.create_dimensions(x, y, time_dependent=True, use_time_bounds=True)
nc.define_2d_field("xx", time_dependent=True,
attrs={"long_name": "xx",
"comment": "test variable",
"valid_range": (-200.0, 200.0)})
for t in [0, 1, 2, 3]:
nc.append_time(t, (t - 1, t))
nc.write("xx", xx + t, time_dependent=True)
nc.write("yy", yy + 2 * t, time_dependent=True)
nc.close()
|
pism/pism
|
examples/preprocessing/PISMNC.py
|
Python
|
gpl-3.0
| 5,381
|
[
"NetCDF"
] |
ccdf2640633efc9270873552962b267def0a14ec51fddb84cf74fdc4ba0bc30c
|
# Copyright 2016 Curtis Sand
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library of Audio Signal Generators for making digital noises."""
import math
import cmath
import numpy
from scipy import fftpack
from .common import defaults
from .waveform import Waveform
from .effects import normalize
class Generator(object):
"""A Basic Signal Generator.
Each signal produced is generated independently of previously generated
signals.
"""
def __init__(self, length=None, framerate=None, verbose=False,
fade_percentage=None):
self.length = length
if not length:
self.length = defaults.length
self.framerate = framerate
if not framerate:
self.framerate = defaults.framerate
self.verbose = verbose
self.fade_percentage = fade_percentage if fade_percentage else 0.02
def _init(self, length=None, framerate=None, verbose=None, **kwargs):
if length:
self.length = length
if framerate:
self.framerate = framerate
if verbose:
self.verbose = verbose
# framecount = frames / sec * sec
self.framecount = int(self.framerate * self.length)
# rectify length to actual framecount
self.length = float(self.framecount) / self.framerate
self.dprint('generating %s frames' % self.framecount)
self.wavedata = numpy.zeros(self.framecount)
if 'phase' in kwargs:
self.phase = kwargs['phase']
else:
self.phase = numpy.random.random() * 2 * math.pi
@property
def waveform(self):
return Waveform(self.wavedata, self.framerate)
def dprint(self, msg):
"""Conditionally print a debugging message."""
if self.verbose:
print(msg)
def whitenoise(self, *args, **kwargs):
"""Random Gaussian White Noise."""
self._init(*args, **kwargs)
self.wavedata = numpy.random.randn(self.framecount)
return self.wavedata
def _sinusoid_angle(self, frame, frequency):
"""Calculate the sinusoid angle for a given frame and frequency."""
return 2 * math.pi * frequency * frame / self.framerate
def _sinusoid_value(self, frame, frequency):
"""Calculate the value of a sinusoid wave at a given frequency."""
return math.sin(self.phase + self._sinusoid_angle(frame, frequency))
def sin_constant(self, frequency, *args, **kwargs):
"""Sinusoid wave of constant frequency."""
self._init(*args, **kwargs)
frequency = float(frequency)
fade_frames = self.fade_percentage * self.framecount
fade_point = self.framecount - fade_frames
for frame in range(self.framecount):
value = self._sinusoid_value(frame, frequency)
if frame > fade_point: # fade the end of the note
_old = value
value *= 1 - (frame - fade_point) / fade_frames
if frame % 50 == 0:
self.dprint('fade from %s to %s' % (_old, value))
if frame < fade_frames:
_old = value
value *= frame / fade_frames
self.wavedata[frame] = value
return self.waveform
def sin_linear(self, start_freq, end_freq, *args, **kwargs):
"""Sinusoid wave of linearly changing frequency."""
self._init(*args, **kwargs)
for frame in range(len(self.wavedata)):
frequency = start_freq + frame * (
float(end_freq - start_freq) / self.framecount)
value = self._sinusoid_value(frame, frequency)
self.wavedata[frame] = value
return self.waveform
class FFTGenerator(Generator):
'''Use an Inverse Fourier Transform to create a multifrequency sinusoid.
The generated sinusoid is a single waveform comprised of multiple
frequencies that were not generated as their own fundamental waveforms
first.
'''
def __init__(self, length=None, framerate=None, verbose=False):
self.approx_desired_precision = 10 # Hz
self.length = length
if not length:
self.length = defaults.length
self.framerate = framerate
if not framerate:
self.framerate = defaults.framerate
self.verbose = verbose
@property
def window_size(self):
"""Length of a window size as determined by the desired precision."""
return int(self.framerate / 2 / self.approx_desired_precision)
@property
def new_window(self):
"""Create a new empty array for a window."""
return numpy.zeros(self.window_size)
@property
def frequencies(self):
"""The frequencies mapped to bins in the frequency domain."""
return fftpack.fftfreq(self.window_size, 1.0 / self.framerate)
def _get_frequency_bin(self, requested_freq):
"""Find the FFT bin corresponding closest to requested frequency."""
diff = 20000 # Need large value, 20KHz is high enough
closest_index = None
for index, freq in enumerate(self.frequencies):
if freq < 0:
continue
tmp_diff = abs(requested_freq - freq)
if tmp_diff < diff:
diff = tmp_diff
closest_index = index
self.dprint('using bin %s for freq %s' %
(closest_index, requested_freq))
return closest_index
def generate(self, frequencies, **kwargs):
"""Generate the requested waveform."""
super(FFTGenerator, self)._init(**kwargs)
wavedata = Waveform(numpy.zeros(int(self.framerate * self.length)))
freq_domain_stub = self.new_window
for frequency in frequencies:
ifft_bin = self._get_frequency_bin(frequency)
freq_domain_stub[ifft_bin] = self.framerate / len(frequencies)
window = Waveform(normalize(numpy.real(
fftpack.ifft(freq_domain_stub))))
for count, frame in enumerate(range(0, self.framecount,
self.window_size)):
wavedata = wavedata.insert(frame, window)
self.dprint('{} generated {}, {} length windows'.format(
self.__class__.__name__, count, self.window_size))
self.wavedata = wavedata
return wavedata
class ContinuousGenerator(Generator):
"""Generate and accumulate a continuous signal accross multiple notes.
For the description below,
LengthA="lenth_of_note - (1/2 * transition_length)"
and
LengthB="length_of_note - transition_length".
For the first note generate a constant note of length LengthA.
For subsequent notes generate a linear signal from the freq. of the first
note to that of the second of the set transition length, followed by a
constant note of length LengthB.
If the keyword "end=True" is used then the constant portion of the length
should only be LengthA. ::
| | | |
{------}\ | LengthB | LengthA |
| \| | |
| LengthA |\ |/{----------}
| | \{-------}/| |
Note: Due to the fact that this generator currently ignores the phase of
the sinusoid being generated, when the frequency is modulated during a
transition period there are some unfortunate problems. When the frequency
of the waveform shifts we need to compensate by adding a phase shift so
that we are using the same phase angle of a sinusoid of a new frequency.
Since we ignore phase though, both frequency and phase shift and we end up
seeing an antialiased waveform of an apparent frequency much higher than
either of the two frequencies being transitioned between.
TODO: See if I can figure out the appropriate phase calculation to anchor
the phase angle of the sinusoid as the frequency shifts during the
transition period.
"""
def __init__(self, length=None, framerate=None, verbose=False):
super(ContinuousGenerator, self).__init__(length, framerate, verbose)
self.phase = 0 # don't do any random phase shifting
self.frequency = 0.001 # avoid divide by zero
self.end = False
self.start = True
self.last_frame = 0
self.wavedata = numpy.zeros(1)
self.transition_length = int(self.framerate * 0.1)
if self.transition_length % 2 != 0: # need even length transition
self.transition_length += 1
@property
def _constant_length(self):
adjustment = self.transition_length
if len(self.wavedata) <= 1 or self.end:
adjustment /= 2
return int(self.framecount - adjustment)
def _init(self, frequency=None, length=None, verbose=None, end=None,
**kwargs):
if frequency:
self.last_frequency = self.frequency
self.frequency = frequency
if length:
self.length = length
if verbose:
self.verbose = verbose
if end:
self.end = end
# framecount = frames / sec * sec
self.framecount = int(self.framerate * self.length)
# rectify length to actual framecount
self.length = float(self.framecount) / self.framerate
def _prep_wavedata(self, transition=False):
# save the frame where the next note starts
adjustment = self.transition_length
if not transition:
adjustment = self._constant_length
new_block = numpy.zeros(adjustment)
self.wavedata = numpy.concatenate((self.wavedata, new_block))
def _constant(self):
"""Append sinusoid wave of constant frequency to wavedata."""
self.dprint('constant freq from frame %s to %s' %
(self.last_frame,
self.last_frame + self._constant_length))
frequency = float(self.frequency)
for frame in range(self.last_frame,
self.last_frame + self._constant_length):
value = self._sinusoid_value(frame, frequency)
print('const frame %s at value %s at %s' % (
frame, value, frequency))
self.wavedata[frame] = value
self.last_frame = frame
def _transition(self):
"""Append sinusoid wave of linearly changing frequency to wavedata."""
self.dprint('transition from frame %s to %s' %
(self.last_frame,
self.last_frame + self.transition_length))
for frame in range(self.last_frame,
self.last_frame + self.transition_length):
modifier = ((frame - self.last_frame) *
float(self.frequency - self.last_frequency) /
self.transition_length)
frequency = (self.last_frequency + modifier)
value = self._sinusoid_value(frame, frequency)
print('transition frame %s at %s value %s: mod %s' % (
frame, frequency, value, modifier))
self.wavedata[frame] = value
self.last_frame = frame
def generate(self, frequency, length, end=False, *args, **kwargs):
self._init(frequency=frequency, length=length, end=end,
*args, **kwargs)
self.dprint('generating %s new frames at %s' % (self.framecount,
frequency))
if self.start:
self.dprint('Starting initial frequency in the signal...')
self._prep_wavedata()
self._constant()
self.dprint('signal is now %s long' % len(self.wavedata))
self.start = False
else:
self.dprint('Adding transition... %s to %s' % (
self.last_frequency, self.frequency))
self._prep_wavedata(transition=True)
self._transition()
self.dprint(' transition now %s long' % len(self.wavedata))
self.dprint('Adding the %s note...' % self.frequency)
self._prep_wavedata()
self._constant()
self.dprint('signal is now %s long' % len(self.wavedata))
class PhasorGenerator(object):
"""Generate a sinusoid by simulating a phasor in the imaginary plane.
By creating an imaginary number using python's polar coordinates methods we
can simulate a phasor vector in the imaginary plane. By rotating the vector
around the origin and taking the projection of the vector onto the real
axis we can generate a sinusoid.
# Anchoring Phase while stepping frequency
# get value for first frame at first frequency
phasor = cmath.rect(amplitude, 2 * math.pi * frame * frequency / framerate)
phasor_argument = cmath.atan(phasor.imag / phasor.real)
sinusoid_value = phasor.real
# get phase for second frequency
tmp_phasor = cmath.rect(
amplitude, 2 * math.pi * frame * new_frequency / framerate)
tmp_phase_arg = cmath.atan(tmp_phasor.imag / tmp_phasor.real)
phase_correction = phasor_argument.real - tmp_phase_arg.real
# check phase correction
test_phasor = cmath.rect(
amplitude, (2 * math.pi * frame * new_frequency /
framerate) + phase_correction)
if sinusoid_value != test_phasor.real:
# recalculate test_phasor with "phase_correction += math.pi"
if new_test_phasor.real != sinusoid_value:
raise Exception("something is wrong")
# get value for next frame at new frequency
new_phasor = cmath.rect(
amplitude, (2 * math.pi * (frame + 1) * new_frequency /
framerate) + phase_correction)
"""
def __init__(self, length=None, framerate=None, verbose=False):
self.length = length
if not length:
self.length = defaults.length
self.framerate = framerate
if not framerate:
self.framerate = defaults.framerate
self.verbose = verbose
self.wavedata = numpy.zeros(1)
self.last_frame = -1
self.last_phase = 0
self.last_frequency = 0
self.frequency = 0
self.phase = 0
self.amplitude = 1 # assumed to be constant for now
self.cmp_precision = 1e-05
@property
def waveform(self):
return Waveform(self.wavedata, self.framerate)
def _prep_wavedata(self):
new_block = numpy.zeros(self.framecount + 1)
self.wavedata = numpy.concatenate((self.wavedata, new_block))
def dprint(self, msg):
"""Conditionally print a debugging message."""
if self.verbose:
print(msg)
def _time(self, frame):
"""Convert wavedata frame units to seconds.
Unit Conversion:
seconds = frame / (frame / second) = second * frame / frame
"""
return float(frame) / float(self.framerate)
def _angle(self, frame, frequency, phase):
"""Calculate sinusoid angle in radians."""
return 2 * math.pi * frequency * self._time(frame) + phase
def _phasor(self, frame, frequency, phase):
"""Generate a phasor in the imaginary plane for the given point."""
return cmath.rect(self.amplitude,
self._angle(frame, frequency, phase))
def _phasor_argument(self, phasor):
"""Calculate the phasor argument.
The phasor argument can be used to calculate the appropriate phase
correction when transitioning between frequencies.
"""
return cmath.atan(phasor.imag / phasor.real).real
def _calculate_phase_correction(self):
"""Calculate a new phase correction value for the new frequency."""
self.dprint('Calculating frequency phase correction...')
self.dprint(' Looking for sinusoid value %s' %
self.wavedata[self.last_frame])
# phasor for new frequency at the last frame
new_phasor = self._phasor(self.last_frame, self.frequency, 0)
new_phasor_arg = self._phasor_argument(new_phasor)
phase_correction = self.last_phase - new_phasor_arg
corrected_phasor = self._phasor(self.last_frame, self.frequency,
phase_correction)
self.dprint(' First try at correction: %s' %
corrected_phasor.real)
# Check whether we have the correct solution or if we need another half
# period for the phase correction to match up
if not numpy.isclose(self.wavedata[self.last_frame],
corrected_phasor.real,
rtol=self.cmp_precision):
self.dprint(' Not close enough, adding 1/2 a period.')
phase_correction += math.pi
corrected_phasor = self._phasor(self.last_frame, self.frequency,
phase_correction)
self.dprint(' New correction: %s' % corrected_phasor.real)
if not numpy.isclose(self.wavedata[self.last_frame],
corrected_phasor.real,
rtol=self.cmp_precision):
raise Exception('Something is wrong, the correction does not '
'match up.')
self.phase = phase_correction
self.dprint(' New phase correction for freq %s set to %s' %
(self.frequency, self.phase))
def _generate(self):
"""Continue generating the sinusoid at the current frequency."""
self._prep_wavedata()
for frame in range(self.last_frame + 1,
self.last_frame + self.framecount + 1):
phasor = self._phasor(frame, self.frequency, self.phase)
self.wavedata[frame] = phasor.real
self.last_frame = frame
self.last_phase = self._phasor_argument(phasor)
self.last_frequency = self.frequency
def generate(self, frequency, length=None):
"""Generate a new note and append it to the wavedata container."""
self.frequency = frequency
if length:
self.length = length
# framecount = frames / sec * sec
self.framecount = int(self.framerate * self.length)
# rectify length to actual framecount
self.length = float(self.framecount) / self.framerate
if (self.frequency != self.last_frequency
and not len(self.wavedata) <= 1):
self._calculate_phase_correction()
self._generate()
|
fretboardfreak/potty_oh
|
potty_oh/signal_generator.py
|
Python
|
apache-2.0
| 19,082
|
[
"Gaussian"
] |
fa386c29865352546d4afed7bf9a6f99372a4fcc10857a898233a03efbcef3ec
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for edit widgets.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Matthias Kuhn'
__date__ = '28/11/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
from qgis.core import (
QgsFeature,
QgsVectorLayer,
QgsProject,
QgsRelation,
QgsTransaction,
QgsFeatureRequest,
QgsVectorLayerTools
)
from qgis.gui import (
QgsGui,
QgsRelationWidgetWrapper,
QgsAttributeEditorContext,
QgsMapCanvas
)
from qgis.PyQt.QtCore import QTimer
from qgis.PyQt.QtWidgets import (
QToolButton,
QMessageBox,
QDialogButtonBox,
QTableView,
QApplication
)
from qgis.testing import start_app, unittest
start_app()
class TestQgsRelationEditWidget(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
Setup the involved layers and relations for a n:m relation
:return:
"""
cls.mapCanvas = QgsMapCanvas()
QgsGui.editorWidgetRegistry().initEditors(cls.mapCanvas)
cls.dbconn = 'service=\'qgis_test\''
if 'QGIS_PGTEST_DB' in os.environ:
cls.dbconn = os.environ['QGIS_PGTEST_DB']
# Create test layer
cls.vl_b = QgsVectorLayer(cls.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."books" sql=', 'books', 'postgres')
cls.vl_a = QgsVectorLayer(cls.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."authors" sql=', 'authors', 'postgres')
cls.vl_link = QgsVectorLayer(cls.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."books_authors" sql=', 'books_authors', 'postgres')
QgsProject.instance().addMapLayer(cls.vl_b)
QgsProject.instance().addMapLayer(cls.vl_a)
QgsProject.instance().addMapLayer(cls.vl_link)
cls.relMgr = QgsProject.instance().relationManager()
cls.rel_a = QgsRelation()
cls.rel_a.setReferencingLayer(cls.vl_link.id())
cls.rel_a.setReferencedLayer(cls.vl_a.id())
cls.rel_a.addFieldPair('fk_author', 'pk')
cls.rel_a.setId('rel_a')
assert(cls.rel_a.isValid())
cls.relMgr.addRelation(cls.rel_a)
cls.rel_b = QgsRelation()
cls.rel_b.setReferencingLayer(cls.vl_link.id())
cls.rel_b.setReferencedLayer(cls.vl_b.id())
cls.rel_b.addFieldPair('fk_book', 'pk')
cls.rel_b.setId('rel_b')
assert(cls.rel_b.isValid())
cls.relMgr.addRelation(cls.rel_b)
# Our mock QgsVectorLayerTools, that allow injecting data where user input is expected
cls.vltools = VlTools()
assert(cls.vl_a.isValid())
assert(cls.vl_b.isValid())
assert(cls.vl_link.isValid())
def setUp(self):
self.startTransaction()
def tearDown(self):
self.rollbackTransaction()
del self.transaction
def test_delete_feature(self):
"""
Check if a feature can be deleted properly
"""
self.createWrapper(self.vl_a, '"name"=\'Erich Gamma\'')
self.assertEqual(self.table_view.model().rowCount(), 1)
self.assertEqual(1, len([f for f in self.vl_b.getFeatures()]))
fid = next(self.vl_b.getFeatures(QgsFeatureRequest().setFilterExpression('"name"=\'Design Patterns. Elements of Reusable Object-Oriented Software\''))).id()
self.widget.featureSelectionManager().select([fid])
btn = self.widget.findChild(QToolButton, 'mDeleteFeatureButton')
def clickOk():
# Click the "Delete features" button on the confirmation message
# box
widget = self.widget.findChild(QMessageBox)
buttonBox = widget.findChild(QDialogButtonBox)
deleteButton = next((b for b in buttonBox.buttons() if buttonBox.buttonRole(b) == QDialogButtonBox.AcceptRole))
deleteButton.click()
QTimer.singleShot(1, clickOk)
btn.click()
# This is the important check that the feature is deleted
self.assertEqual(0, len([f for f in self.vl_b.getFeatures()]))
# This is actually more checking that the database on delete action is properly set on the relation
self.assertEqual(0, len([f for f in self.vl_link.getFeatures()]))
self.assertEqual(self.table_view.model().rowCount(), 0)
def test_list(self):
"""
Simple check if several related items are shown
"""
wrapper = self.createWrapper(self.vl_b) # NOQA
self.assertEqual(self.table_view.model().rowCount(), 4)
@unittest.expectedFailure(os.environ.get('QT_VERSION', '5') == '4' and os.environ.get('TRAVIS_OS_NAME', '') == 'linux') # It's probably not related to this variables at all, but that's the closest we can get to the real source of this problem at the moment...
def test_add_feature(self):
"""
Check if a new related feature is added
"""
self.createWrapper(self.vl_a, '"name"=\'Douglas Adams\'')
self.assertEqual(self.table_view.model().rowCount(), 0)
self.vltools.setValues([None, 'The Hitchhiker\'s Guide to the Galaxy'])
btn = self.widget.findChild(QToolButton, 'mAddFeatureButton')
btn.click()
# Book entry has been created
self.assertEqual(2, len([f for f in self.vl_b.getFeatures()]))
# Link entry has been created
self.assertEqual(5, len([f for f in self.vl_link.getFeatures()]))
self.assertEqual(self.table_view.model().rowCount(), 1)
def test_link_feature(self):
"""
Check if an existing feature can be linked
"""
wrapper = self.createWrapper(self.vl_a, '"name"=\'Douglas Adams\'') # NOQA
f = QgsFeature(self.vl_b.fields())
f.setAttributes([self.vl_b.dataProvider().defaultValueClause(0), 'The Hitchhiker\'s Guide to the Galaxy'])
self.vl_b.addFeature(f)
def choose_linked_feature():
dlg = QApplication.activeModalWidget()
dlg.setSelectedFeatures([f.id()])
dlg.accept()
btn = self.widget.findChild(QToolButton, 'mLinkFeatureButton')
timer = QTimer()
timer.setSingleShot(True)
timer.setInterval(0) # will run in the event loop as soon as it's processed when the dialog is opened
timer.timeout.connect(choose_linked_feature)
timer.start()
btn.click()
# magically the above code selects the feature here...
link_feature = next(self.vl_link.getFeatures(QgsFeatureRequest().setFilterExpression('"fk_book"={}'.format(f[0]))))
self.assertIsNotNone(link_feature[0])
self.assertEqual(self.table_view.model().rowCount(), 1)
def test_unlink_feature(self):
"""
Check if a linked feature can be unlinked
"""
wrapper = self.createWrapper(self.vl_b) # NOQA
# All authors are listed
self.assertEqual(self.table_view.model().rowCount(), 4)
it = self.vl_a.getFeatures(
QgsFeatureRequest().setFilterExpression('"name" IN (\'Richard Helm\', \'Ralph Johnson\')'))
self.widget.featureSelectionManager().select([f.id() for f in it])
self.assertEqual(2, self.widget.featureSelectionManager().selectedFeatureCount())
btn = self.widget.findChild(QToolButton, 'mUnlinkFeatureButton')
btn.click()
# This is actually more checking that the database on delete action is properly set on the relation
self.assertEqual(2, len([f for f in self.vl_link.getFeatures()]))
self.assertEqual(2, self.table_view.model().rowCount())
def test_discover_relations(self):
"""
Test the automatic discovery of relations
"""
relations = self.relMgr.discoverRelations([], [self.vl_a, self.vl_b, self.vl_link])
relations = {r.name(): r for r in relations}
self.assertEqual({'books_authors_fk_book_fkey', 'books_authors_fk_author_fkey'}, set(relations.keys()))
ba2b = relations['books_authors_fk_book_fkey']
self.assertTrue(ba2b.isValid())
self.assertEqual('books_authors', ba2b.referencingLayer().name())
self.assertEqual('books', ba2b.referencedLayer().name())
self.assertEqual([0], ba2b.referencingFields())
self.assertEqual([0], ba2b.referencedFields())
ba2a = relations['books_authors_fk_author_fkey']
self.assertTrue(ba2a.isValid())
self.assertEqual('books_authors', ba2a.referencingLayer().name())
self.assertEqual('authors', ba2a.referencedLayer().name())
self.assertEqual([1], ba2a.referencingFields())
self.assertEqual([0], ba2a.referencedFields())
self.assertEqual([], self.relMgr.discoverRelations([self.rel_a, self.rel_b], [self.vl_a, self.vl_b, self.vl_link]))
self.assertEqual(1, len(self.relMgr.discoverRelations([], [self.vl_a, self.vl_link])))
def startTransaction(self):
"""
Start a new transaction and set all layers into transaction mode.
:return: None
"""
lyrs = [self.vl_a, self.vl_b, self.vl_link]
self.transaction = QgsTransaction.create(lyrs)
self.transaction.begin()
for l in lyrs:
l.startEditing()
def rollbackTransaction(self):
"""
Rollback all changes done in this transaction.
We always rollback and never commit to have the database in a pristine
state at the end of each test.
:return: None
"""
lyrs = [self.vl_a, self.vl_b, self.vl_link]
for l in lyrs:
l.commitChanges()
self.transaction.rollback()
def createWrapper(self, layer, filter=None):
"""
Basic setup of a relation widget wrapper.
Will create a new wrapper and set its feature to the one and only book
in the table.
It will also assign some instance variables to help
* self.widget The created widget
* self.table_view The table view of the widget
:return: The created wrapper
"""
if layer == self.vl_b:
relation = self.rel_b
nmrel = self.rel_a
else:
relation = self.rel_a
nmrel = self.rel_b
self.wrapper = QgsRelationWidgetWrapper(layer, relation)
self.wrapper.setConfig({'nm-rel': nmrel.id()})
context = QgsAttributeEditorContext()
context.setVectorLayerTools(self.vltools)
self.wrapper.setContext(context)
self.widget = self.wrapper.widget()
self.widget.show()
request = QgsFeatureRequest()
if filter:
request.setFilterExpression(filter)
book = next(layer.getFeatures(request))
self.wrapper.setFeature(book)
self.table_view = self.widget.findChild(QTableView)
return self.wrapper
class VlTools(QgsVectorLayerTools):
"""
Mock the QgsVectorLayerTools
Since we don't have a user on the test server to input this data for us, we can just use this.
"""
def setValues(self, values):
"""
Set the values for the next feature to insert
:param values: An array of values that shall be used for the next inserted record
:return: None
"""
self.values = values
def addFeature(self, layer, defaultValues, defaultGeometry):
"""
Overrides the addFeature method
:param layer: vector layer
:param defaultValues: some default values that may be provided by QGIS
:param defaultGeometry: a default geometry that may be provided by QGIS
:return: tuple(ok, f) where ok is if the layer added the feature and f is the added feature
"""
values = list()
for i, v in enumerate(self.values):
if v:
values.append(v)
else:
values.append(layer.dataProvider().defaultValueClause(i))
f = QgsFeature(layer.fields())
f.setAttributes(self.values)
f.setGeometry(defaultGeometry)
ok = layer.addFeature(f)
return ok, f
def startEditing(self, layer):
pass
def stopEditing(self, layer, allowCancel):
pass
def saveEdits(self, layer):
pass
if __name__ == '__main__':
unittest.main()
|
geopython/QGIS
|
tests/src/python/test_qgsrelationeditwidget.py
|
Python
|
gpl-2.0
| 12,569
|
[
"Galaxy"
] |
b3d6c414ee0556409ea28fac9ac3e078a834e2e2f8cb53f2c8041b645271e43e
|
__author__ = 'Mike McCann'
__copyright__ = '2012'
__license__ = 'GPL v3'
__contact__ = 'mccann at mbari.org'
__doc__ = '''
STOQS Query manager for building ajax responses to selections made for QueryUI
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
from collections import defaultdict, Mapping
from django.conf import settings
from django.db import transaction
from django.db.models import Q, Max, Min, Sum, Avg
from django.db.models.sql import query
from django.contrib.gis.db.models import Extent, Union
from django.contrib.gis.geos import fromstr, MultiPoint, Point
from django.db.utils import DatabaseError, DataError
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse
from stoqs import models
from loaders import MEASUREDINSITU, X3DPLATFORMMODEL, X3D_MODEL
from loaders.SampleLoaders import SAMPLED, NETTOW, PLANKTONPUMP, ESP_FILTERING, sample_simplify_crit, SAMPLE_TYPES
from matplotlib.colors import rgb2hex
from .utils import round_to_n, postgresifySQL, EPOCH_STRING, EPOCH_DATETIME
from .utils import (getGet_Actual_Count, getShow_Sigmat_Parameter_Values, getShow_StandardName_Parameter_Values,
getShow_All_Parameter_Values, getShow_Parameter_Platform_Data)
from .utils import simplify_points, getParameterGroups
from .geo import GPS
from .MPQuery import MPQuery
from .PQuery import PQuery
from .Viz import MeasuredParameter, ParameterParameter, PPDatabaseException, PlatformAnimation
from coards import to_udunits
from datetime import datetime
from django.contrib.gis import gdal
import logging
import matplotlib.pyplot as plt
import pprint
import calendar
import re
import locale
import time
import os
import numpy as np
logger = logging.getLogger(__name__)
# Constants to be also used by classifiers in contrib/analysis
LABEL = 'label'
DESCRIPTION = 'description'
COMMANDLINE = 'commandline'
LRAUV_MISSION = 'LRAUV Mission'
spherical_mercator_srid = 3857
# Constants for parametertime coordinates
LONGITUDE_UNITS = 'degrees_east'
LATITUDE_UNITS = 'degrees_north'
DEPTH_UNITS = 'm'
TIME_UNITS = 'seconds since 1970-01-01'
class STOQSQManager(object):
'''
This class is designed to handle building and managing queries against the STOQS database.
Chander Ganesan <chander@otg-nc.com>
'''
def __init__(self, request, response, dbname, *args, **kwargs):
'''
This object should be created by passing in an HTTPRequest Object, an HTTPResponse object
and the name of the database to be used.
'''
self.request = request
self.dbname = dbname
self.kwargs = kwargs
self.response = response
self.mpq = MPQuery(request)
self.contour_mpq = MPQuery(request)
self.pq = PQuery(request)
self.pp = None
self._actual_count = None
self.initialQuery = True
self.platformTypeHash = {}
# monkey patch sql/query.py to make it use our database for sql generation
query.DEFAULT_DB_ALIAS = dbname
# Dictionary of items that get returned via AJAX as the JSON response. Make available as member variable.
self.options_functions = {
'activitynames': self.getActivityNames,
'sampledparametersgroup': self.getParameters,
'measuredparametersgroup': self.getParameters,
'parameterminmax': self.getParameterMinMax,
'platforms': self.getPlatforms,
'time': self.getTime,
'depth': self.getDepth,
'simpledepthtime': self.getSimpleDepthTime,
##'simplebottomdepthtime': self.getSimpleBottomDepthTime,
'parametertime': self.getParameterTime,
'sampledepthtime': self.getSampleDepthTime,
'sampledurationsdepthtime': self.getSampleDurationDepthTime,
'counts': self.getCounts,
'mpsql': self.getMeasuredParametersPostgreSQL,
'spsql': self.getSampledParametersPostgreSQL,
'extent': self.getExtent,
'activityparameterhistograms': self.getActivityParameterHistograms,
'parameterplatformdatavaluepng': self.getParameterDatavaluePNG,
'parameterparameterx3d': self.getParameterParameterX3D,
'measuredparameterx3d': self.getMeasuredParameterX3D,
'curtainx3d': self.getPDV_IFSs,
'platformanimation': self.getPlatformAnimation,
'parameterparameterpng': self.getParameterParameterPNG,
'parameterplatforms': self.getParameterPlatforms,
'x3dterrains': self.getX3DTerrains,
'x3dplaybacks': self.getX3DPlaybacks,
'resources': self.getResources,
'attributes': self.getAttributes,
'updatefromzoom': self.getUpdateFromZoom,
}
def buildQuerySets(self, *args):
'''
Build the query sets based on any selections from the UI. We need one for Activities and one for Samples
'''
self.kwargs['fromTable'] = 'Activity'
self._buildQuerySet()
self.kwargs['fromTable'] = 'Sample'
self._buildQuerySet()
self.kwargs['fromTable'] = 'ActivityParameter'
self._buildQuerySet()
self.kwargs['fromTable'] = 'ActivityParameterHistogram'
self._buildQuerySet()
def _buildQuerySet(self, *args):
'''
Build the query set based on any selections from the UI. For the first time through kwargs will be empty
and self.qs will be built of a join of activities, parameters, and platforms with no constraints.
Right now supported keyword arguments are the following:
sampledparametersgroup - a list of sampled parameter ids to include
measuredparametersgroup - a list of measured parameter ids to include
parameterstandardname - a list of parameter styandard_names to include
platforms - a list of platform names to include
time - a two-tuple consisting of a start and end time, if either is None, the assumption is no start (or end) time
depth - a two-tuple consisting of a range (start/end depth, if either is None, the assumption is no start (or end) depth
parametervalues - a dictionary of parameter names and tuples of min & max values to use as constraints
these are passed onto MPQuery and processed from the kwargs dictionary
parameterparameter - a tuple of Parameter ids for x, y, z axes and color for correlation plotting
These are all called internally - so we'll assume that all the validation has been done in advance,
and the calls to this method meet the requirements stated above.
'''
fromTable = 'Activity' # Default is Activity
if 'fromTable' in self.kwargs:
fromTable = self.kwargs['fromTable']
if 'qs' in args:
logger.debug('Using query string passed in to make a non-activity based query')
qs = args['qs']
else:
# Provide "base" querysets with depth and filters so that more efficient inner joins are generated
if fromTable == 'Activity':
logger.debug('Making default activity based query')
qs = models.Activity.objects.using(self.dbname).all() # To receive filters constructed below from kwargs
qs_platform = qs
elif fromTable == 'Sample':
logger.debug('Making %s based query', fromTable)
qs = models.Sample.objects.using(self.dbname).all() # To receive filters constructed below from kwargs
# Exclude sub (child) samples where name is not set. Flot UI needs a name for its selector
qs = qs.exclude(name__isnull=True)
elif fromTable == 'ActivityParameter':
logger.debug('Making %s based query', fromTable)
qs = models.ActivityParameter.objects.using(self.dbname).all() # To receive filters constructed below from kwargs
elif fromTable == 'ActivityParameterHistogram':
logger.debug('Making %s based query', fromTable)
qs = models.ActivityParameterHistogram.objects.using(self.dbname).all() # To receive filters constructed below from kwargs
else:
logger.exception('No handler for fromTable = %s', fromTable)
self.args = args
# Determine if this is the intial query and set a flag
for k, v in list(self.kwargs.items()):
# Test keys that can affect the MeasuredParameter count
if k == 'depth' or k == 'time':
if v[0] is not None or v[1] is not None:
self.initialQuery = False
elif k in ['measuredparametersgroup', 'parameterstandardname', 'platforms']:
if v:
logger.debug('Setting self.initialQuery = False because %s = %s', k, v)
self.initialQuery = False
logger.debug('self.initialQuery = %s', self.initialQuery)
# Check to see if there is a "builder" for a Q object using the given parameters and build up the filter from the Q objects
for k, v in list(self.kwargs.items()):
if not v:
continue
if k == 'fromTable':
continue
if hasattr(self, '_%sQ' % (k,)):
# Call the method if it exists, and add the resulting Q object to the filtered queryset.
q = getattr(self,'_%sQ' % (k,))(v, fromTable)
logger.debug('fromTable = %s, k = %s, v = %s, q = %s', fromTable, k, v, q)
qs = qs.filter(q)
# Build qs_platform for Platform UI buttons to work
if k != 'platforms' and fromTable == 'Activity':
qs_platform = qs_platform.filter(q)
# Assign query sets for the current UI selections
if fromTable == 'Activity':
self.qs = qs.using(self.dbname)
self.qs_platform = qs_platform
##logger.debug('Activity query = %s', str(self.qs.query))
elif fromTable == 'Sample':
self.sample_qs = qs.using(self.dbname)
##logger.debug('Sample query = %s', str(self.sample_qs.query))
elif fromTable == 'ActivityParameter':
self.activityparameter_qs = qs.using(self.dbname)
##logger.debug('activityparameter_qs = %s', str(self.activityparameter_qs.query))
elif fromTable == 'ActivityParameterHistogram':
self.activityparameterhistogram_qs = qs.using(self.dbname)
##logger.debug('activityparameterhistogram_qs = %s', str(self.activityparameterhistogram_qs.query))
def generateOptions(self):
'''
Generate a dictionary of all the selectable parameters by executing each of the functions
to generate those parameters. In this case, we'll simply do it by defining the dictionary and it's associated
function, then iterate over that dictionary calling the function(s) to get the value to be returned.
Note that in the case of parameters the return is a list of 2-tuples of (name, standard_name) and for
platforms the result is a list of 3-tuples of (name, id, color) the associated elements.
For time and depth, the result is a single 2-tuple with the min and max value (respectively.)
These objects are "simple" dictionaries using only Python's built-in types - so conversion to a
corresponding JSON object should be trivial.
'''
results = {}
for k, v in list(self.options_functions.items()):
if self.kwargs['only'] != []:
if k not in self.kwargs['only']:
continue
if k in self.kwargs['except']:
continue
start_time = time.time()
if k == 'measuredparametersgroup':
results[k] = v(MEASUREDINSITU)
elif k == 'sampledparametersgroup':
results[k] = v(SAMPLED)
else:
results[k] = v()
logger.info(f"Built in {1000*(time.time()-start_time):6.1f} ms {k} with {str(v).split('.')[1].split(' ')[0]}()")
return results
#
# Methods that generate summary data, based on the current query criteria
#
def getUpdateFromZoom(self):
if self.request.GET.get('updatefromzoom', '0') == '1':
return 1
else:
return 0
def getActivityNames(self):
'''Return list of activities that have been selected in UI's Metadata -> NetCDF section
'''
activity_names = None
if 'activitynames' in self.kwargs:
activity_names = self.kwargs.get('activitynames')
return activity_names
def getCounts(self):
'''
Collect all of the various counts into a dictionary
'''
# Always get approximate count
logger.debug('str(self.getActivityParametersQS(forCount=True).query) = %s', str(self.getActivityParametersQS(forCount=True).query))
approximate_count = self.getActivityParametersQS(forCount=True).aggregate(Sum('number'))['number__sum']
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
# Actual counts are None unless the 'Get actual count' box is checked
actual_count = None
actual_count_localized = None
if getGet_Actual_Count(self.kwargs):
if not self.mpq.qs_mp:
self.mpq.buildMPQuerySet(*self.args, **self.kwargs)
if self._actual_count:
actual_count = self._actual_count
else:
logger.debug('Calling self.mpq.getMPCount()')
actual_count = self.mpq.getMPCount()
logger.debug('actual_count = %s', actual_count)
try:
approximate_count_localized = locale.format("%d", approximate_count, grouping=True)
except TypeError:
logger.warn('Failed to format approximate_count = %s into a number, setting to None', approximate_count)
approximate_count_localized = None
if actual_count:
try:
actual_count_localized = locale.format("%d", actual_count, grouping=True)
except TypeError:
logger.exception('Failed to format actual_count = %s into a number', actual_count)
return { 'ap_count': self.getAPCount(),
'approximate_count': approximate_count,
'approximate_count_localized': approximate_count_localized,
'actual_count': actual_count,
'actual_count_localized': actual_count_localized
}
def getMeasuredParametersPostgreSQL(self):
'''
Wrapper around self.mpq.getMeasuredParametersPostgreSQL(), ensure that we have qs_mp built before calling
'''
sql = ''
if not self.mpq.qs_mp:
self.mpq.buildMPQuerySet(*self.args, **self.kwargs)
self.mpq.initialQuery = self.initialQuery
try:
sql = self.mpq.getMeasuredParametersPostgreSQL()
self._actual_count = self.mpq.getMPCount()
except Exception as e:
logger.warn('Could not get MeasuredParametersPostgreSQL: %s', e)
return sql
def getSampledParametersPostgreSQL(self):
'''
Wrapper around self.mpq.getSampledParametersPostgreSQL(), ensure that we have qs_mp built before calling
'''
if not self.mpq.qs_mp:
self.mpq.buildMPQuerySet(*self.args, **self.kwargs)
self.mpq.initialQuery = self.initialQuery
sql = self.mpq.getSampledParametersPostgreSQL()
return sql
def getAPCount(self):
'''
Return count of ActivityParameters given the current constraints
'''
qs_ap = self.getActivityParametersQS() # Approximate count from ActivityParameter
if qs_ap:
return qs_ap.count()
else:
return 0
def getActivityParametersQS(self, forCount=False):
'''
Return query set of ActivityParameters given the current constraints.
If forCount is True then add list of measured parameters to the query; this is done here for the query
needed for getting the count. The ParameterParameter min & max query also uses self.activityparameter_qs
and we don't want the addition of the measured parameters query for that.
'''
if not self.activityparameter_qs:
logger.warn("self.activityparameter_qs is None")
if forCount:
if self.kwargs['measuredparametersgroup']:
logger.debug('Adding Q object for parameter__id__in = %s', self.kwargs['measuredparametersgroup'])
return self.activityparameter_qs.filter(Q(parameter__id__in=self.kwargs['measuredparametersgroup']))
else:
return self.activityparameter_qs
else:
return self.activityparameter_qs
def getActivityParameterHistogramsQS(self):
'''
Return query set of ActivityParameterHistograms given the current constraints.
'''
return self.activityparameterhistogram_qs
def getSampleQS(self):
'''
Return query set of Samples given the current constraints.
'''
return self.sample_qs
def getParameters(self, groupName=''):
'''
Get a list of the unique parameters that are left based on the current query criteria.
We assume here that the name is unique and is also used for the id
'''
# Django makes it easy to do sub-queries: Get Parameters from list of Activities matching current selection
p_qs = models.Parameter.objects.using(self.dbname).filter(Q(activityparameter__activity__in=self.qs))
if 'mplabels' in self.kwargs:
if self.kwargs['mplabels']:
# Get all Parameters that have common Measurements given the filter of the selected labels
# - this allows selection of co-located MeasuredParameters
commonMeasurements = models.MeasuredParameterResource.objects.using(self.dbname).filter(
resource__id__in=self.kwargs['mplabels']).values_list(
'measuredparameter__measurement__id', flat=True)
p_qs = p_qs.filter(Q(id__in=models.MeasuredParameter.objects.using(self.dbname).filter(
Q(measurement__id__in=commonMeasurements)).values_list('parameter__id', flat=True)))
if groupName:
p_qs = p_qs.filter(parametergroupparameter__parametergroup__name=groupName)
if self.kwargs.get('activitynames'):
p_qs = p_qs.filter(activityparameter__activity__name__in=self.kwargs.get('activitynames'))
p_qs = p_qs.values('name', 'standard_name', 'id', 'units', 'long_name', 'description').distinct()
results=[]
for row in p_qs.order_by('name'):
name = row['name']
standard_name = row['standard_name']
id = row['id']
units = row['units']
# Get additional Parameter information from NetCDF variable attributes
long_name = row['long_name']
if not long_name:
long_name_q = models.ParameterResource.objects.using(self.dbname).filter(
parameter__id=id, resource__name='long_name').values(
'resource__value')
if long_name_q:
long_name = long_name_q[0].get('resource__value', '')
else:
long_name = ''
comment = ''
comment_q = models.ParameterResource.objects.using(self.dbname).filter(
parameter__id=id, resource__name='comment').values(
'resource__value', 'parameter__name')
if not comment_q:
pass
else:
comment = f"{comment_q[0].get('resource__value', '')}"
if comment_q.count() > 1:
comment += f" ({(comment_q.count() -1)} comments for additional platforms not shown)"
comment += ". "
description = row.get('description', '')
if not description:
description = ''
if not standard_name:
standard_name = ''
if name is not None:
results.append((name, standard_name, id, units, long_name, comment, description))
return results
def getParameterMinMax(self, pid=None, percentileAggregateType='avg'):
'''
If a single parameter has been selected in the filter for data access return the average 2.5 and 97.5
percentiles of the data and call them min and max for purposes of data access, namely KML generation in
the UI - assign these values to the 'dataaccess' key of the return hash. If pid is specificed then
assign values to the 'plot' key of the return hash. If @percentileAggregateType is 'avg' (the default)
then the average of all the 2.5 and 97.5 percentiles will be used. This would be appropriate for
contour or scatter plotting. If @percentileAggregateType is 'extrema' then the aggregate Min is used
for 'p010' and Max for 'p990'. This is appropriate for parameter-parameter plotting.
'''
da_results = []
plot_results = []
# pid takes precedence over parameterplot being specified in kwargs
if pid:
try:
if percentileAggregateType == 'extrema':
logger.debug('self.getActivityParametersQS().filter(parameter__id=%s) = %s', pid, str(self.getActivityParametersQS().filter(parameter__id=pid).query))
qs = self.getActivityParametersQS().filter(parameter__id=pid).aggregate(Min('p010'), Max('p990'), Avg('median'))
logger.debug('qs = %s', qs)
try:
plot_results = [pid, round_to_n(qs['p010__min'],4), round_to_n(qs['p990__max'],4)]
except TypeError:
logger.warn('Failed to get plot_results for qs = %s', qs)
else:
qs = self.getActivityParametersQS().filter(parameter__id=pid).aggregate(Avg('p025'), Avg('p975'), Avg('median'))
try:
plot_results = [pid, round_to_n(qs['p025__avg'],4), round_to_n(qs['p975__avg'],4)]
if plot_results[1] == plot_results[2]:
logger.debug('Standard min and max for for pid %s are the same. Getting the overall min and max values.', pid)
qs = self.getActivityParametersQS().filter(parameter__id=pid).aggregate(Min('p025'), Max('p975'))
plot_results = [pid, round_to_n(qs['p025__min'],4), round_to_n(qs['p975__max'],4)]
except TypeError:
logger.debug('Failed to get plot_results for qs = %s', qs)
except ValueError as e:
if pid in ('longitude', 'latitude'):
# Get limits from Activity maptrack for which we have our getExtent() method
extent, lon_mid, lat_mid, _ = self.getExtent(outputSRID=4326)
if pid == 'longitude':
plot_results = ['longitude', round_to_n(extent[0][0], 4), round_to_n(extent[1][0],4)]
if pid == 'latitude':
plot_results = ['latitude', round_to_n(extent[0][1], 4), round_to_n(extent[1][1],4)]
elif pid == 'depth':
dminmax = self.qs.aggregate(Min('mindepth'), Max('maxdepth'))
plot_results = ['depth', round_to_n(dminmax['mindepth__min'], 4), round_to_n(dminmax['maxdepth__max'],4)]
elif pid == 'time':
epoch = EPOCH_DATETIME
tminmax = self.qs.aggregate(Min('startdate'), Max('enddate'))
tmin = (tminmax['startdate__min'] - epoch).days + (tminmax['startdate__min'] - epoch).seconds / 86400.
tmax = (tminmax['enddate__max'] - epoch).days + (tminmax['enddate__max'] - epoch).seconds / 86400.
plot_results = ['time', tmin, tmax]
else:
logger.error('%s, but pid text = %s is not a coordinate', e, pid)
return {'plot': plot_results, 'dataaccess': []}
except DataError as e:
# Likely "value out of range: overflow", clamp to limits of single-precision floats
logger.warn(f'{e}')
logger.warn(f'Setting pid = {pid} in plot_results to min/max to limits of single-precision floats')
plot_results = [pid, round_to_n(np.finfo('f').min, 4), round_to_n(np.finfo('f').max, 4)]
elif 'parameterplot' in self.kwargs:
if self.kwargs['parameterplot'][0]:
parameterID = self.kwargs['parameterplot'][0]
try:
if percentileAggregateType == 'extrema':
qs = self.getActivityParametersQS().filter(parameter__id=parameterID).aggregate(Min('p025'), Max('p975'))
plot_results = [parameterID, round_to_n(qs['p025__min'],4), round_to_n(qs['p975__max'],4)]
else:
qs = self.getActivityParametersQS().filter(parameter__id=parameterID).aggregate(Avg('p025'), Avg('p975'))
plot_results = [parameterID, round_to_n(qs['p025__avg'],4), round_to_n(qs['p975__avg'],4)]
except TypeError as e:
# Likely 'Cannot plot Parameter' that is not in selection, ignore for cleaner functional tests
logger.debug(f'parameterID = {parameterID}: {str(e)}')
except DataError as e:
logger.warn(f'{e}')
logger.warn(f'Setting pid = {pid} in plot_results to min/max to limits of single-precision floats')
plot_results = [pid, round_to_n(np.finfo('f').min, 4), round_to_n(np.finfo('f').max, 4)]
if 'measuredparametersgroup' in self.kwargs:
if len(self.kwargs['measuredparametersgroup']) == 1:
mpid = self.kwargs['measuredparametersgroup'][0]
try:
pid = models.Parameter.objects.using(self.dbname).get(id=mpid).id
logger.debug('pid = %s', pid)
if percentileAggregateType == 'extrema':
qs = self.getActivityParametersQS().filter(parameter__id=pid).aggregate(Min('p010'), Max('p990'))
da_results = [pid, round_to_n(qs['p010__min'],4), round_to_n(qs['p990__max'],4)]
else:
qs = self.getActivityParametersQS().filter(parameter__id=pid).aggregate(Avg('p025'), Avg('p975'))
da_results = [pid, round_to_n(qs['p025__avg'],4), round_to_n(qs['p975__avg'],4)]
except TypeError as e:
logger.exception(e)
except DataError as e:
logger.warn(f'{e}')
logger.warn(f'Setting pid = {pid} in da_results to min/max to limits of single-precision floats')
da_results = [pid, round_to_n(np.finfo('f').min, 4), round_to_n(np.finfo('f').max, 4)]
if 'sampledparametersgroup' in self.kwargs:
if len(self.kwargs['sampledparametersgroup']) == 1:
spid = self.kwargs['sampledparametersgroup'][0]
try:
pid = models.Parameter.objects.using(self.dbname).get(id=spid).id
logger.debug('pid = %s', pid)
if percentileAggregateType == 'extrema':
qs = self.getActivityParametersQS().filter(parameter__id=pid).aggregate(Min('p010'), Max('p990'))
da_results = [pid, round_to_n(qs['p010__min'],4), round_to_n(qs['p990__max'],4)]
else:
qs = self.getActivityParametersQS().filter(parameter__id=pid).aggregate(Avg('p025'), Avg('p975'))
da_results = [pid, round_to_n(qs['p025__avg'],4), round_to_n(qs['p975__avg'],4)]
except TypeError as e:
logger.exception(e)
if 'parameterstandardname' in self.kwargs:
if len(self.kwargs['parameterstandardname']) == 1:
sname = self.kwargs['parameterstandardname'][0]
try:
if percentileAggregateType == 'extrema':
qs = self.getActivityParametersQS().filter(parameter__standard_name=sname).aggregate(Min('p025'), Max('p975'))
da_results = [sname, round_to_n(qs['p025__min'],4), round_to_n(qs['p975__max'],4)]
else:
qs = self.getActivityParametersQS().filter(parameter__standard_name=sname).aggregate(Avg('p025'), Avg('p975'))
da_results = [sname, round_to_n(qs['p025__avg'],4), round_to_n(qs['p975__avg'],4)]
except TypeError as e:
logger.exception(e)
# Sometimes da_results is empty, make it the same as plot_results if this happens
# TODO: simplify the logic implemented above...
if not da_results:
da_results = plot_results
cmincmax = []
if self.request.GET.get('cmin') and self.request.GET.get('cmax'):
if plot_results:
cmincmax = [plot_results[0],
float(self.request.GET.get('cmin')),
float(self.request.GET.get('cmax'))]
if self.request.GET.get('cmincmax_lock') == '1':
plot_results = cmincmax
else:
# Likely a selection from the UI that doesn't include the plot parameter
logger.debug('plot_results is empty')
return {'plot': plot_results, 'dataaccess': da_results, 'cmincmax': cmincmax}
def _getPlatformModel(self, platformName):
'''Return Platform X3D model information. Designed for stationary
platforms from non-trajectory Activities.
'''
@transaction.atomic(using=self.dbname)
def _innerGetPlatformModel(self, platform):
modelInfo = None, None, None, None
pModel = models.PlatformResource.objects.using(self.dbname).filter(
resource__resourcetype__name=X3DPLATFORMMODEL,
resource__name=X3D_MODEL,
platform__name=platformName).values_list(
'resource__uristring', flat=True).distinct()
if pModel:
# Timeseries and timeseriesProfile data for a single platform
# (even if composed of multiple Activities) must have single
# unique horizontal position.
geom_list = list([_f for _f in self.qs.filter(platform__name=platformName)
.values_list('nominallocation__geom', flat=True)
.distinct() if _f])
try:
geom = geom_list[0]
except IndexError:
return modelInfo
if not geom:
return modelInfo
if len(geom_list) > 1:
logger.debug('More than one location for %s returned.'
'Using first one found: %s', platformName, geom)
# TimeseriesProfile data has multiple nominaldepths - look to
# Resource for nominaldepth of the Platform for these kind of data.
depth_list = self.qs.filter(platform__name=platformName).values_list(
'nominallocation__depth', flat=True).distinct()
if len(depth_list) > 1:
logger.debug('More than one depth for %s returned. Checking '
'Resource for nominaldepth', platformName)
try:
depth = float(models.PlatformResource.objects.using(self.dbname).filter(
resource__resourcetype__name=X3DPLATFORMMODEL,
platform__name=platformName,
resource__name='X3D_MODEL_nominaldepth'
).values_list('resource__value', flat=True)[0])
logger.debug('Got depth = %s from X3D_MODEL_nominaldepth in '
'PlatformResource', depth)
except (IndexError, ObjectDoesNotExist):
logger.warn('Resource name X3D_MODEL_nominaldepth not found for '
'for platform %s. Using a nominaldepth of 0.0', platformName)
depth = 0.0
else:
depth = depth_list[0]
modelInfo = (pModel[0], geom.y, geom.x,
-depth * float(self.request.GET.get('ve', 1)))
return modelInfo
return _innerGetPlatformModel(self, platformName)
def getPlatforms(self):
'''
Get a list of the unique platforms that are left based on the current query criteria.
We assume here that the name is unique and is also used for the id - this is enforced on
data load. Organize the platforms into a dictionary keyed by platformType.
'''
if self.platformTypeHash:
return self.platformTypeHash
# Use queryset that does not filter out platforms - so that Platform buttons work in the UI
qs = (self.qs_platform.filter(~Q(activitytype__name=LRAUV_MISSION))
.values('platform__uuid', 'platform__name', 'platform__color',
'platform__platformtype__name').distinct().order_by('platform__name'))
if self.kwargs.get('activitynames'):
qs = qs.filter(name__in=self.kwargs.get('activitynames'))
platformTypeHash = defaultdict(list)
logger.debug(f"Begining to build platformTypeHash...")
for row in qs:
logger.debug(f"Checking row = {row}")
name=row['platform__name']
id=row['platform__name']
color=row['platform__color']
platformType = row['platform__platformtype__name']
if name is not None and id is not None:
# Get the featureType(s) from the Resource
fts = models.ActivityResource.objects.using(self.dbname).filter(resource__name='featureType',
activity__platform__name=name).values_list('resource__value', flat=True).distinct()
# Make all lower case
fts = [ft.lower() for ft in fts]
if len(fts) > 1:
logger.warn('More than one featureType returned for platform %s: %s.', name, fts)
logger.warn(f"Using '{fts[0]}'. Consider using a different Platform name for the other featureType(s).")
try:
featureType = fts[0]
except IndexError:
logger.warn('No featureType returned for platform name = %s. Setting it to "trajectory".', name)
featureType = 'trajectory'
if 'trajectory' in featureType:
platformTypeHash[platformType].append((name, id, color, featureType, ))
else:
# Filter out models from static platforms not in the selection
if name in self.qs.values_list('platform__name', flat=True):
logger.debug(f"Seeing if Platform {name} has an x3dModel...")
x3dModel, x, y, z = self._getPlatformModel(name)
if not x3dModel:
logger.debug("No x3dModel. Not adding x3dModel")
platformTypeHash[platformType].append((name, id, color, featureType, ))
continue
# Only add stationary X3D model for platforms that don't have roll, pitch and yaw
# Platforms with rotations have their X3D model added to the scene in stoqs/utils/Viz/animation.py
logger.debug(f"Seeing if Platform {name} has roll, pitch, and yaw Parameters...")
pr_qs = models.ActivityParameter.objects.using(self.dbname).filter(activity__platform__name=name)
has_roll = pr_qs.filter(parameter__standard_name='platform_roll_angle')
has_pitch = pr_qs.filter(parameter__standard_name='platform_pitch_angle')
has_yaw = pr_qs.filter(parameter__standard_name='platform_yaw_angle')
if has_roll or has_pitch or has_yaw:
logger.debug("Has roll, pitch, or yaw. Not adding x3dModel")
platformTypeHash[platformType].append((name, id, color, featureType, ))
else:
logger.debug("Has x3dModel, no rotations, adding x3dModel")
platformTypeHash[platformType].append((name, id, color, featureType, x3dModel, x, y, z))
logger.debug(f"Done building platformTypeHash.")
self.platformTypeHash = platformTypeHash
return platformTypeHash
def getTime(self):
'''
Based on the current selected query criteria, determine the available time range. That'll be
returned as a 2-tuple as the min and max values that are selectable.
'''
# Documentation of some query optimization (tested with dorado & tethys data from June 2010 loaded with a stide of 100)
# =====================================================================================================================
# The statements:
# qs=self.qs.aggregate(Max('instantpoint__timevalue'), Min('instantpoint__timevalue'))
# return (qs['instantpoint__timevalue__min'], qs['instantpoint__timevalue__max'],)
# produce this SQL which takes 75.2 ms to execute:
# stoqs_june2011=# explain analyze SELECT DISTINCT MAX("stoqs_instantpoint"."timevalue") AS "instantpoint__timevalue__max", MIN("stoqs_instantpoint"."timevalue") AS "instantpoint__timevalue__min" FROM "stoqs_activity" LEFT OUTER JOIN "stoqs_instantpoint" ON ("stoqs_activity"."id" = "stoqs_instantpoint"."activity_id");
# QUERY PLAN
# ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# HashAggregate (cost=738.13..738.14 rows=1 width=8) (actual time=75.154..75.154 rows=1 loops=1)
# -> Aggregate (cost=738.11..738.12 rows=1 width=8) (actual time=75.144..75.145 rows=1 loops=1)
# -> Merge Left Join (cost=0.00..629.34 rows=21755 width=8) (actual time=0.032..51.337 rows=21726 loops=1)
# Merge Cond: (stoqs_activity.id = stoqs_instantpoint.activity_id)
# -> Index Scan using stoqs_activity_pkey on stoqs_activity (cost=0.00..17.58 rows=45 width=4) (actual time=0.008..0.058 rows=36 loops=1)
# -> Index Scan using stoqs_instantpoint_activity_id on stoqs_instantpoint (cost=0.00..707.58 rows=21755 width=12) (actual time=0.016..19.982 rows=21726 loops=1)
# Total runtime: 75.231 ms
# (7 rows)
#
# The statements:
# qs=self.qs.aggregate(Max('enddate'), Min('startdate'))
# return (qs['startdate__min'], qs['enddate__max'],)
# take 0.22 ms
# stoqs_june2011=# explain analyze SELECT DISTINCT MIN("stoqs_activity"."startdate") AS "startdate__min", MAX("stoqs_activity"."enddate") AS "enddate__max" FROM "stoqs_activity";
# QUERY PLAN
# -----------------------------------------------------------------------------------------------------------------------
# HashAggregate (cost=5.69..5.70 rows=1 width=16) (actual time=0.154..0.156 rows=1 loops=1)
# -> Aggregate (cost=5.67..5.69 rows=1 width=16) (actual time=0.143..0.144 rows=1 loops=1)
# -> Seq Scan on stoqs_activity (cost=0.00..5.45 rows=45 width=16) (actual time=0.009..0.064 rows=36 loops=1)
# Total runtime: 0.219 ms
# (4 rows)
#
# While only a fraction of a second different, it is 342 times faster!
qs=self.qs.aggregate(Max('enddate'), Min('startdate'))
try:
times = (time.mktime(qs['startdate__min'].timetuple())*1000, time.mktime(qs['enddate__max'].timetuple())*1000,)
except AttributeError:
logger.exception('Failed to get timetuple from qs = %s', qs)
return
else:
return times
def getDepth(self):
'''
Based on the current selected query criteria, determine the available depth range. That'll be
returned as a 2-tuple as the min and max values that are selectable.
'''
# Original query that dives into the measurment table via instantpoint
##qs=self.qs.aggregate(Max('instantpoint__measurement__depth'), Min('instantpoint__measurement__depth'))
##return (qs['instantpoint__measurement__depth__min'],qs['instantpoint__measurement__depth__max'])
# Alternate query to use stats stored with the Activity
qs=self.qs.aggregate(Max('maxdepth'), Min('mindepth'))
try:
depths = ('%.2f' % qs['mindepth__min'], '%.2f' % qs['maxdepth__max'])
except TypeError:
logger.exception('Failed to convert mindepth__min and/or maxdepth__max to float from qs = %s', qs)
return
else:
return depths
def _add_ts_tsp_to_sdt(self, p, plq, timeSeriesQ, timeSeriesProfileQ, sdt):
'''Add to the sdt hash a timeseries or timeseries structure
'''
iptvq = Q()
qs_tsp = None
logger.debug(f"Building sdt for Platform {p}")
qs_tsp = (self.qs.filter(plq & (timeSeriesQ | timeSeriesProfileQ))
.select_related()
.values('simpledepthtime__epochmilliseconds',
'simpledepthtime__depth', 'name',
'simpledepthtime__nominallocation__depth')
.order_by('simpledepthtime__epochmilliseconds')
.distinct())
if 'time' in self.kwargs:
if self.kwargs['time'][0] is not None and self.kwargs['time'][1] is not None:
logger.debug(f"Querying beween {self.kwargs['time']}")
qs_tsp = qs_tsp.filter(Q(instantpoint__timevalue__gte = self.kwargs['time'][0]) &
Q(instantpoint__timevalue__lte = self.kwargs['time'][1]))
# Add to sdt hash date-time series organized by
# activity__name_nominallocation__depth key within a platform__name key
logger.debug(' filling sdt[]')
for sd in qs_tsp:
an_nd = '%s_%s' % (sd['name'], sd['simpledepthtime__nominallocation__depth'])
if 'simpledepthtime__epochmilliseconds' in sd:
sdt[p[0]][an_nd].append(
[sd['simpledepthtime__epochmilliseconds'],
'%.2f' % sd['simpledepthtime__nominallocation__depth']] )
logger.debug(' Done filling sdt[].')
def getSimpleDepthTime(self):
'''
Based on the current selected query criteria for activities, return the associated SimpleDepth time series
values as a 2-tuple list inside a 2 level hash of platform__name (with its color) and activity__name.
Multiple simpledepthtimes may be created. There is always a 'default', which is the original concept with
the sdt items organized by Activities associated with the data sources -- usually NetCDF files.
SimpleDepthTimes may also be organized by some other criteria, for example LRAUV_MISSION type of
Activities. If these exist in the database, then additional top-level hashes with the ActivityType
as the key will be created.
'''
trajectoryQ = self._trajectoryQ()
timeSeriesQ = self._timeSeriesQ()
timeSeriesProfileQ = self._timeSeriesProfileQ()
trajectoryProfileQ = self._trajectoryProfileQ()
# Define colors for other_activitytypes - at some point these will also need to go into the Spatial panel
at_colors = defaultdict(dict)
other_activitytypes = (LRAUV_MISSION, )
gist_ncar = plt.cm.gist_ncar
for at in other_activitytypes:
acts = models.Activity.objects.using(self.request.META['dbAlias']).filter(activitytype__name=at)
for act, c in zip(acts, gist_ncar(np.linspace(0, gist_ncar.N, len(acts), dtype=int))):
at_colors[at][act.name] = rgb2hex(c)[1:]
# Always have a 'default' ActivityType, and can loop over any number of other ActivityTypes
# - As of May 2019 only 'trajectory's have other_activitytypes, skip for timeseries, etc.
sdt_groups = defaultdict(dict)
for act_type in ('default', ) + other_activitytypes:
sdt_groups[act_type]['sdt'] = defaultdict(dict)
sdt_groups[act_type]['colors'] = defaultdict(dict)
for plats in list(self.getPlatforms().values()):
for p in plats:
logger.debug('Platform name: ' + p[0])
plq = Q(platform__name = p[0])
if self.kwargs.get('activitynames'):
plq = plq & Q(name__in=self.kwargs.get('activitynames'))
sdt_groups[act_type]['sdt'][p[0]] = defaultdict(list)
if act_type == 'default':
sdt_groups[act_type]['colors'][p[0]] = p[2]
else:
sdt_groups[act_type]['colors'][p[0]] = {}
if p[3].lower() == 'trajectory':
# Overkill to also filter on trajectoryQ too if p[3].lower() == 'trajectory'
# - old Tethys data does not have NC_GLOBAL featureType
qs_traj = (self.qs.filter(plq)
.values_list('simpledepthtime__epochmilliseconds',
'simpledepthtime__depth', 'name')
.order_by('simpledepthtime__epochmilliseconds'))
if act_type == 'default':
# The default does not include the other ActivityTypes
qs_traj = qs_traj.filter(~Q(activitytype__name__in=other_activitytypes))
else:
qs_traj = qs_traj.filter(activitytype__name=act_type)
# Add to sdt hash date-time series organized by activity__name key
# within a platform__name key. This will let flot plot the series with
# gaps between the surveys -- not connected
logger.debug(f"-trajectory, filling sdt_groups['{act_type}']['sdt']['{p[0]}'][]")
for s in qs_traj:
if s[1] is not None:
sdt_groups[act_type]['sdt'][p[0]][s[2]].append( [s[0], '%.2f' % s[1]] )
if act_type != 'default':
for number, act_mission in enumerate(sdt_groups[act_type]['sdt'][p[0]].keys()):
sdt_groups[act_type]['colors'][p[0]][act_mission] = at_colors[act_type][act_mission]
logger.debug(f" Done filling sdt_groups['{act_type}']['sdt']['{p[0]}'][]")
elif (p[3].lower() == 'timeseries' or p[3].lower() == 'timeseriesprofile') and act_type == 'default':
self._add_ts_tsp_to_sdt(p, plq, timeSeriesQ, timeSeriesProfileQ, sdt_groups[act_type]['sdt'])
elif p[3].lower() == 'trajectoryprofile' and act_type == 'default': # pragma: no cover
iptvq = Q()
qs_tp = None
if 'time' in self.kwargs:
if self.kwargs['time'][0] is not None and self.kwargs['time'][1] is not None:
s_ems = time.mktime(datetime
.strptime(self.kwargs['time'][0], '%Y-%m-%d %H:%M:%S')
.timetuple())*1000
e_ems = time.mktime(datetime
.strptime(self.kwargs['time'][1], '%Y-%m-%d %H:%M:%S')
.timetuple())*1000
iptvq = (Q(simpledepthtime__epochmilliseconds__gte = s_ems) &
Q(simpledepthtime__epochmilliseconds__lte = e_ems))
qs_tp = (self.qs.filter(plq & trajectoryProfileQ & iptvq)
.select_related()
.values('name', 'simpledepthtime__depth',
'simpledepthtime__nominallocation__depth',
'simpledepthtime__epochmilliseconds')
.order_by('simpledepthtime__nominallocation__depth',
'simpledepthtime__epochmilliseconds')
.distinct())
if not qs_tp:
qs_tp = (self.qs.filter(plq & trajectoryProfileQ).select_related()
.values('name', 'simpledepthtime__depth',
'simpledepthtime__nominallocation__depth',
'simpledepthtime__epochmilliseconds')
.order_by('simpledepthtime__nominallocation__depth',
'simpledepthtime__epochmilliseconds')
.distinct())
# Add to sdt hash date-time series organized by activity__name_nominallocation__depth
# key within a platform__name key - use real depths
for sd in qs_tp:
##logger.debug('sd = %s', sd)
an_nd = '%s_%s' % (sd['name'], sd['simpledepthtime__nominallocation__depth'])
##logger.debug('an_nd = %s', an_nd)
if 'simpledepthtime__epochmilliseconds' in sd:
sdt_groups[act_type]['sdt'][p[0]][an_nd].append(
[sd['simpledepthtime__epochmilliseconds'],
'%.2f' % sd['simpledepthtime__depth']])
# Cleanup - remove platforms that have no simpledepthtime data values
if not sdt_groups[act_type]['sdt'][p[0]]:
del sdt_groups[act_type]['sdt'][p[0]]
del sdt_groups[act_type]['colors'][p[0]]
# Remove ActivityTypes that ave no simpledepthtime data
if not sdt_groups[act_type]['sdt']:
del sdt_groups[act_type]
return sdt_groups
def getSimpleBottomDepthTime(self):
'''
Based on the current selected query criteria for activities, return the associated SimpleBottomDepth time series
values as a 2-tuple list inside a 2 level hash of platform__name and activity__name. Append a third value to the
x,y time series of a maximum depth (positive number in meters) so that Flot will fill downward. See:
http://stackoverflow.com/questions/23790277/flot-fill-color-above-a-line-graph
'''
sbdt = {}
maxDepth = 10971 # Max ocean depth
trajectoryQ = self._trajectoryQ()
for plats in list(self.getPlatforms().values()):
for p in plats:
plq = Q(platform__name = p[0])
sbdt[p[0]] = {}
if p[3].lower() == 'trajectory':
qs_traj = self.qs.filter(plq & trajectoryQ).values_list( 'simplebottomdepthtime__epochmilliseconds', 'simplebottomdepthtime__bottomdepth',
'name').order_by('simplebottomdepthtime__epochmilliseconds')
# Add to sbdt hash date-time series organized by activity__name key within a platform__name key
# This will let flot plot the series with gaps between the surveys -- not connected
for s in qs_traj:
try:
sbdt[p[0]][s[2]].append( [s[0], '%.2f' % s[1], maxDepth] )
except KeyError:
sbdt[p[0]][s[2]] = [] # First time seeing activity__name, make it a list
if s[1] is not None:
sbdt[p[0]][s[2]].append( [s[0], '%.2f' % s[1], maxDepth] ) # Append first value, even if it is 0.0
except TypeError:
continue # Likely "float argument required, not NoneType"
return({'sbdt': sbdt})
#
# The following set of private (_...) methods are for building the parametertime response
#
def _collectParameters(self, platform, pt, pa_units, is_standard_name, ndCounts, strides, colors):
'''
Get parameters for this platform and collect units in a parameter name hash, use standard_name if set and repair bad names.
Return a tuple of pa_units, is_standard_name, ndCounts, and pt dictionaries.
'''
# Get parameters for this platform and collect units in a parameter name hash, use standard_name if set and repair bad names
p_qs = models.Parameter.objects.using(self.dbname).filter(Q(activityparameter__activity__in=self.qs))
logger.debug("self.kwargs['parametertimeplotid'] = %s", self.kwargs['parametertimeplotid'])
if self.kwargs['parametertimeplotid']:
p_qs = p_qs.filter(Q(id__in=self.kwargs['parametertimeplotid']))
p_qs = p_qs.filter(activityparameter__activity__platform__name=platform[0]).distinct()
else:
p_qs = []
for parameter in p_qs:
unit = parameter.units
# Get the number of nominal depths for this parameter
nds = models.NominalLocation.objects.using(self.dbname
).filter( Q(activity__in=self.qs),
activity__platform__name=platform[0],
measurement__measuredparameter__parameter=parameter
).values('depth').distinct().count()
# Check if timeSeries plotting is requested for trajectory data
plotTimeSeriesDepths = models.ParameterResource.objects.using(self.dbname).filter(
parameter__name=parameter, resource__name='plotTimeSeriesDepth'
).values_list('resource__value')
if nds == 0 and plotTimeSeriesDepths == []:
continue
if parameter.standard_name == 'sea_water_salinity':
unit = 'PSU'
if parameter.standard_name and parameter.standard_name.strip() != '':
logger.debug('Parameter name "%s" has standard_name = %s', parameter.name, parameter.standard_name)
pa_units[parameter.standard_name] = unit
is_standard_name[parameter.standard_name] = True
ndCounts[parameter.standard_name] = nds
colors[parameter.standard_name] = parameter.id
strides[parameter.standard_name] = {}
else:
logger.debug('Parameter name "%s" does not have a standard_name', parameter.name)
pa_units[parameter.name] = unit
is_standard_name[parameter.name] = False
ndCounts[parameter.name] = nds
colors[parameter.name] = parameter.id
strides[parameter.name] = {}
# Initialize pt dictionary of dictionaries with its keys
if unit not in list(pt.keys()):
logger.debug('Initializing pt[%s] = {}', unit)
pt[unit] = {}
# Add coordinates keys if asked for from the UI
if self.kwargs['parametertimeplotcoord']:
if 'Longitude' in self.kwargs['parametertimeplotcoord']:
pt[LONGITUDE_UNITS] = {}
pa_units['Longitude'] = LONGITUDE_UNITS
strides['Longitude'] = {}
is_standard_name['Longitude'] = False
ndCounts['Longitude'] = 1
if 'Latitude' in self.kwargs['parametertimeplotcoord']:
pt[LATITUDE_UNITS] = {}
pa_units['Latitude'] = LATITUDE_UNITS
strides['Latitude'] = {}
is_standard_name['Latitude'] = False
ndCounts['Latitude'] = 1
if 'Depth' in self.kwargs['parametertimeplotcoord']:
pt[DEPTH_UNITS] = {}
pa_units['Depth'] = DEPTH_UNITS
strides['Depth'] = {}
is_standard_name['Depth'] = False
ndCounts['Depth'] = 1
if 'Time' in self.kwargs['parametertimeplotcoord']:
pt[TIME_UNITS] = {}
pa_units['Time'] = TIME_UNITS
strides['Time'] = {}
is_standard_name['Time'] = False
ndCounts['Time'] = 1
return (pa_units, is_standard_name, ndCounts, pt, colors, strides)
def _get_activity_nominaldepths(self, p):
'''Return hash of starting depths for parameter keyed by activity
'''
plotTimeSeriesActivityDepths = {}
# See if timeSeries plotting is requested for trajectory data, e.g. BEDS
pr_qs = models.ParameterResource.objects.using(self.dbname).filter(parameter__name=p,
resource__name='plotTimeSeriesDepth')
if not pr_qs:
# See if there is one for standard_name
pr_qs = models.ParameterResource.objects.using(self.dbname).filter(parameter__standard_name=p,
resource__name='plotTimeSeriesDepth')
try:
for pr in pr_qs:
logger.debug('pr.parameter.name, pr.resource.value = {}, {}'.format(pr.parameter.name, pr.resource.value))
ars = models.ActivityResource.objects.using(self.dbname).filter(
resource=pr.resource, resource__name='plotTimeSeriesDepth')
# Resource with same value will be one record that may be reused by different
# Activities/Platforms, just blindly fill hash keyed by Activity
for ar in ars:
logger.debug('ar.activity.name = {}'.format(ar.activity.name))
plotTimeSeriesActivityDepths[ar.activity] = pr.resource.value
except ObjectDoesNotExist:
# Likely database loaded before plotTimeSeriesDepth was added to ActivityResource, quietly use first value
for act in self.qs:
plotTimeSeriesActivityDepths[act] = pr_qs[0].resource.value
return plotTimeSeriesActivityDepths
def _append_coords_to_pt(self, qs_mp, pt, pa_units, a, stride, units_dict, strides):
'''
Add coordinates to pt dictionary of dictionaries, making sure to append only once.
Only called if self.kwargs['parametertimeplotcoord'], and needs to be called once per request
'''
# Order by nominal depth first so that strided access collects data correctly from each depth
pt_qs_mp = qs_mp.order_by('measurement__nominallocation__depth', 'measurement__instantpoint__timevalue')[::stride]
logger.debug(f'Adding coordinates for a.name = {a.name}')
for mp in pt_qs_mp:
if mp['datavalue'] is None:
continue
tv = mp['measurement__instantpoint__timevalue']
ems = int(1000 * to_udunits(tv, 'seconds since 1970-01-01'))
if 'Longitude' in self.kwargs['parametertimeplotcoord']:
units = LONGITUDE_UNITS
an_nd = f"{units} - Longitude - {a.name}"
units_dict[units] = 'Longitude'
strides['Longitude'][a.name] = stride
try:
pt[units][an_nd].append((ems, mp['measurement__geom'].x))
except KeyError:
pt[units][an_nd] = []
pt[units][an_nd].append((ems, mp['measurement__geom'].x))
if 'Latitude' in self.kwargs['parametertimeplotcoord']:
units = LATITUDE_UNITS
an_nd = f"{units} - Latitude - {a.name}"
units_dict[units] = 'Latitude'
strides['Latitude'][a.name] = stride
try:
pt[units][an_nd].append((ems, mp['measurement__geom'].y))
except KeyError:
pt[units][an_nd] = []
pt[units][an_nd].append((ems, mp['measurement__geom'].y))
if 'Depth' in self.kwargs['parametertimeplotcoord']:
units = DEPTH_UNITS
an_nd = f"{units} - Depth - {a.name}"
units_dict[units] = 'Depth'
strides['Depth'][a.name] = stride
try:
pt[units][an_nd].append((ems, mp['measurement__depth']))
except KeyError:
pt[units][an_nd] = []
pt[units][an_nd].append((ems, mp['measurement__depth']))
if 'Time' in self.kwargs['parametertimeplotcoord']:
units = TIME_UNITS
an_nd = f"{units} - Time - {a.name}"
units_dict[units] = 'Time'
strides['Time'][a.name] = stride
try:
pt[units][an_nd].append((ems, ems))
except KeyError:
pt[units][an_nd] = []
pt[units][an_nd].append((ems, ems))
return pt, units_dict, strides
def _getParameterTimeFromMP(self, qs_mp, pt, pa_units, a, p, is_standard_name, stride, a_nds, units_dict, strides, save_mp_for_plot=True):
'''
Return hash of time series measuredparameter data with specified stride
'''
# Order by nominal depth first so that strided access collects data correctly from each depth
pt_qs_mp = qs_mp.order_by('measurement__nominallocation__depth', 'measurement__instantpoint__timevalue')[::stride]
logger.debug('Adding time series of parameter = %s in key = %s', p, pa_units[p])
for mp in pt_qs_mp:
if mp['datavalue'] is None:
continue
tv = mp['measurement__instantpoint__timevalue']
ems = int(1000 * to_udunits(tv, 'seconds since 1970-01-01'))
nd = mp['measurement__nominallocation__depth']
if nd:
an_nd = "%s - %s - %s @ %s" % (pa_units[p], p, a.name, nd,)
elif a in a_nds:
try:
an_nd = "%s - %s - %s starting @ %s m" % (pa_units[p], p, a.name, a_nds[a],)
except KeyError:
# Likely data from a load before plotTimeSeriesDepth was added to ActivityResource
an_nd = "%s - %s - %s starting @ ? m" % (pa_units[p], p, a.name)
else:
an_nd = "%s - %s - %s" % (pa_units[p], p, a.name)
if save_mp_for_plot:
try:
pt[pa_units[p]][an_nd].append((ems, mp['datavalue']))
except KeyError:
pt[pa_units[p]][an_nd] = []
pt[pa_units[p]][an_nd].append((ems, mp['datavalue']))
return pt, units_dict, strides
def _getParameterTimeFromAP(self, pt, pa_units, a, p):
'''
Return hash of time series min and max values for specified activity and parameter. To be used when duration
of an activity is less than the pixel width of the flot plot area. This can occur for short event data sets
such as from Benthic Event Detector deployments.
'''
aps = models.ActivityParameter.objects.using(self.dbname).filter(activity=a, parameter__name=p).values('min', 'max')
if not aps:
aps = models.ActivityParameter.objects.using(self.dbname).filter(activity=a,
parameter__standard_name=p).values('min', 'max')
start_ems = int(1000 * to_udunits(a.startdate, 'seconds since 1970-01-01'))
end_ems = int(1000 * to_udunits(a.enddate, 'seconds since 1970-01-01'))
pt[pa_units[p]][a.name] = [[start_ems, aps[0]['min']], [end_ems, aps[0]['max']]]
return pt
def _parameterInSelection(self, p, is_standard_name, parameterType=MEASUREDINSITU):
'''
Return True if parameter name is in the UI selection, either from constraints other than
direct selection or if specifically selected in the UI.
'''
# Coordinates are always in the selection
if p in ('Longitude', 'Latitude', 'Depth', 'Time'):
return True
isInSelection = False
if is_standard_name[p]:
if p in [parms[1] for parms in self.getParameters(parameterType)]:
isInSelection = True
else:
if p in [parms[0] for parms in self.getParameters(parameterType)]:
isInSelection = True
if not isInSelection:
if self.kwargs['measuredparametersgroup']:
if p in self.kwargs['measuredparametersgroup']:
isInSelection = True
else:
isInSelection = False
return isInSelection
def _buildParameterTime(self, pa_units, is_standard_name, ndCounts, pt, strides, pt_qs_mp):
'''
Build structure of timeseries/timeseriesprofile parameters organized by units
'''
PIXELS_WIDE = 800 # Approximate pixel width of parameter-time-flot window
units = {}
# Check if only coord(s) in pa_units
only_coords_flag = False
save_mp_for_plot = True
if not set(pa_units.keys()) - set(('Longitude', 'Latitude', 'Depth', 'Time')):
only_coords_flag = True
# Build units hash of parameter names for labeling axes in flot
for pcount, (p, u) in enumerate(list(pa_units.items())):
logger.debug('is_standard_name = %s. p, u = %s, %s', is_standard_name, p, u)
if not self._parameterInSelection(p, is_standard_name):
logger.debug('Parameter is not in selection')
continue
if p in ('Longitude', 'Latitude', 'Depth', 'Time'):
units[u] = p
else:
try:
units[u] = units[u] + ' ' + p
except KeyError:
units[u] = p
# Apply either parameter name or standard_name to MeasuredParameter and Activity query sets
if is_standard_name[p]:
qs_mp = pt_qs_mp.filter(parameter__standard_name=p)
qs_awp = self.qs.filter(activityparameter__parameter__standard_name=p)
elif only_coords_flag:
# Choose a dummy Parameter and mark for not plotting so that we can collect coordinates
dummy_parm = self.getParameters()[0][0]
logger.info(f"Only coords selected, using {dummy_parm} to go through MPs to get coords")
qs_mp = pt_qs_mp.filter(parameter__name=dummy_parm)
qs_awp = self.qs.filter(activityparameter__parameter__name=dummy_parm)
save_mp_for_plot = False
else:
qs_mp = pt_qs_mp.filter(parameter__name=p)
qs_awp = self.qs.filter(activityparameter__parameter__name=p)
if self.kwargs.get('activitynames'):
qs_awp = qs_awp.filter(name__in=self.kwargs.get('activitynames'))
qs_awp = qs_awp.filter(Q(activityresource__resource__value__icontains='timeseries') |
Q(activityparameter__parameter__parameterresource__resource__name__icontains='plotTimeSeriesDepth')).distinct()
try:
secondsperpixel = self.kwargs['secondsperpixel'][0]
except IndexError:
secondsperpixel = 1500 # Default is a 2-week view (86400 * 14 / 800)
except KeyError:
secondsperpixel = 1500 # Default is a 2-week view (86400 * 14 / 800)
logger.debug('--------------------p = %s, u = %s, is_standard_name[p] = %s', p, u, is_standard_name[p])
# Select each time series by Activity and test against secondsperpixel for deciding on min & max or stride selection
if not ndCounts[p]:
ndCounts[p] = 1 # Trajectories with plotTimeSeriesDepth will not have a nominal depth, set to 1 for calculation below
a_nds = self._get_activity_nominaldepths(p)
# See: https://stackoverflow.com/questions/20582966/django-order-by-filter-with-distinct
for acount, a in enumerate(qs_awp.distinct('startdate', 'name').order_by('startdate')):
qs_mp_a = qs_mp.filter(measurement__instantpoint__activity__name=a.name)
ad = (a.enddate-a.startdate)
aseconds = ad.days * 86400 + ad.seconds
logger.debug('a.name = %s, a.startdate = %s, a.enddate %s, aseconds = %s, secondsperpixel = %s',
a.name, a.startdate, a.enddate, aseconds, secondsperpixel)
if float(aseconds) > float(secondsperpixel) or len(self.kwargs.get('platforms')) == 1:
# Multiple points of this activity can be displayed in the flot, get an appropriate stride
logger.debug('PIXELS_WIDE = %s, ndCounts[p] = %s', PIXELS_WIDE, ndCounts[p])
stride = int(round(qs_mp_a.count() / PIXELS_WIDE / ndCounts[p]))
if stride < 1:
stride = 1
logger.debug('Getting timeseries from MeasuredParameter table with stride = %s', stride)
strides[p][a.name] = stride
logger.debug('Adding timeseries for p = %s, a = %s', p, a)
pt, units, strides = self._getParameterTimeFromMP(qs_mp_a, pt, pa_units, a, p, is_standard_name, stride, a_nds, units, strides, save_mp_for_plot)
if self.kwargs['parametertimeplotcoord'] and acount == 0 and pcount == 0:
pt, units, strides = self._append_coords_to_pt(qs_mp, pt, pa_units, a, stride, units, strides)
else:
# Construct just two points for this activity-parameter using the min & max from the AP table
pt = self._getParameterTimeFromAP(pt, pa_units, a, p)
return (pt, units, strides)
def getParameterTime(self):
'''
Based on the current selected query criteria for activities, return the associated MeasuredParameter datavalue time series
values as a 2-tuple list inside a 3 level hash of featureType, units, and an "activity__name + nominal depth" key
for each line to be drawn by flot. The MeasuredParameter queries here can be costly. Only perform them if the
UI has request only 'parametertime' or if the Parameter tab is active in the UI as indicated by 'parametertab' in self.kwargs.
If part of the larger SummaryData request then return the structure with just counts set - a much cheaper query.
'''
pt = {}
units = {}
colors = {}
strides = {}
pa_units = {}
is_standard_name = {}
ndCounts = {}
colors = {}
counts = 0
# Look for platforms that have featureTypes ammenable for Parameter time series visualization
for plats in list(self.getPlatforms().values()):
for platform in plats:
if self.kwargs.get('platforms'):
# getPlatforms() includes all Platforms, skip over ones not in the selection
if platform[0] not in self.kwargs.get('platforms'):
continue
timeSeriesParmCount = 0
trajectoryParmCount = 0
logger.debug('Doing cheap query for ' + platform[0] + '...')
if platform[3].lower() == 'timeseriesprofile' or platform[3].lower() == 'timeseries':
# Do cheap query to count the number of timeseriesprofile or timeseries parameters
timeSeriesParmCount = models.Parameter.objects.using(self.dbname).filter(
activityparameter__activity__activityresource__resource__name__iexact='featureType',
activityparameter__activity__activityresource__resource__value__iexact=platform[3].lower(),
activityparameter__activity__platform__name=platform[0],
).count()
elif platform[3].lower() == 'trajectory':
# Count trajectory Parameters for which timeSeries plotting has been requested
trajectoryParmCount = models.Parameter.objects.using(self.dbname).filter(
activityparameter__activity__activityresource__resource__name__iexact='featureType',
activityparameter__activity__activityresource__resource__value__iexact=platform[3].lower(),
parameterresource__resource__name__iexact='plotTimeSeriesDepth',
activityparameter__activity__platform__name=platform[0],
).count()
counts += timeSeriesParmCount + trajectoryParmCount
if counts and (self.kwargs.get('parametertimeplotid') or 'parametertimeplotcoord' in self.kwargs):
if 'parametertime' in self.kwargs['only'] or self.kwargs['parametertab']:
# Initialize structure organized by units for parameters left in the selection
logger.debug('Calling self._collectParameters() with platform = %s', platform)
pa_units, is_standard_name, ndCounts, pt, colors, strides = self._collectParameters(platform, pt,
pa_units, is_standard_name, ndCounts, strides, colors)
logger.debug('Done, counts = {}'.format(counts))
if pa_units:
# The base MeasuredParameter query set for existing UI selections
if not self.mpq.qs_mp:
self.mpq.buildMPQuerySet(*self.args, **self.kwargs)
self.mpq.initialQuery = self.initialQuery
# Perform more expensive query: start with qs_mp_no_parm version of the MeasuredParameter query set
pt_qs_mp = self.mpq.qs_mp_no_parm
logger.debug('Before self._buildParameterTime: pt = %s', list(pt.keys()))
pt, units, strides = self._buildParameterTime(pa_units, is_standard_name, ndCounts, pt, strides, pt_qs_mp)
logger.debug('After self._buildParameterTime: pt = %s', list(pt.keys()))
return({'pt': pt, 'units': units, 'counts': counts, 'colors': colors, 'strides': strides})
def getSampleDepthTime(self):
'''
Based on the current selected query criteria for activities, return the associated SampleDepth time series
values as a 2-tuple list. The name similarity to getSimpleDepthTime name is a pure coincidence.
'''
samples = []
if self.getSampleQS():
qs = self.getSampleQS().values_list(
'instantpoint__timevalue',
'depth',
'instantpoint__activity__name',
'name'
).order_by('instantpoint__timevalue')
if self.kwargs.get('activitynames'):
qs = qs.filter(instantpoint__activity__name__in=self.kwargs.get('activitynames'))
for s in qs:
ems = int(1000 * to_udunits(s[0], 'seconds since 1970-01-01'))
# Kludgy handling of activity names - flot needs 2 items separated by a space to handle sample event clicking
if (s[2].find('_decim') != -1):
label = '%s %s' % (s[2].split('_decim')[0], s[3],) # Lop off '_decim.nc (stride=xxx)' part of name
elif (s[2].find(' ') != -1):
label = '%s %s' % (s[2].split(' ')[0], s[3],) # Lop off everything after a space in the activity name
else:
label = '%s %s' % (s[2], s[3],) # Show entire Activity name & sample name
rec = {'label': label, 'data': [[ems, '%.2f' % s[1]]]}
##logger.debug('Appending %s', rec)
samples.append(rec)
return(samples)
def getSampleDurationDepthTime(self):
'''
Based on the current selected query criteria for activities, return the associated SampleDuration time series
values as a 2 2-tuple list. Theses are like SampleDepthTime, but have a depth time series.
The UI uses a different glyph which is why these are delivered in a separate structure.
The convention for SampleDurations is for one Sample per activity, therefore we can examine the attributes
of the activity to get the start and end time and min and max depths, or the depth time series.
'''
sample_durations = []
try:
nettow = models.SampleType.objects.using(self.dbname).get(name__contains=NETTOW)
except models.SampleType.DoesNotExist:
nettow = None
try:
planktonpump = models.SampleType.objects.using(self.dbname).get(name__contains=PLANKTONPUMP)
except models.SampleType.DoesNotExist:
planktonpump = None
try:
esp_archive = models.SampleType.objects.using(self.dbname).get(name__contains=ESP_FILTERING)
except models.SampleType.DoesNotExist:
esp_archive = None
try:
esp_archive_at = models.ActivityType.objects.using(self.dbname).get(name__contains=ESP_FILTERING)
except models.ActivityType.DoesNotExist:
esp_archive_at = None
# Samples for which activity mindepth and maxdepth are sufficient for simpledepthtime display
if self.getSampleQS() and (nettow or planktonpump):
qs = self.getSampleQS().filter( Q(sampletype=nettow)
| Q(sampletype=planktonpump)
).values_list(
'instantpoint__timevalue',
'depth',
'instantpoint__activity__name',
'name',
'instantpoint__activity__startdate',
'instantpoint__activity__enddate',
'instantpoint__activity__mindepth',
'instantpoint__activity__maxdepth',
).order_by('instantpoint__timevalue')
for s in qs:
s_ems = int(1000 * to_udunits(s[4], 'seconds since 1970-01-01'))
e_ems = int(1000 * to_udunits(s[5], 'seconds since 1970-01-01'))
# Kludgy handling of activity names - flot needs 2 items separated by a space to handle sample event clicking
if (s[2].find('_decim') != -1):
label = '%s %s' % (s[2].split('_decim')[0], s[3],) # Lop off '_decim.nc (stride=xxx)' part of name
elif (s[2].find(' ') != -1):
label = '%s %s' % (s[2].split(' ')[0], s[3],) # Lop off everything after a space in the activity name
else:
label = '%s %s' % (s[2], s[3],) # Show entire Activity name & sample name
try:
rec = {'label': label, 'data': [[s_ems, '%.2f' % s[7]], [e_ems, '%.2f' % s[6]]]}
except TypeError:
# Likely s[6] and s[7] are None
continue
sample_durations.append(rec)
# Long duration Samples for which we use the whole depth time series
if self.getSampleQS() and (esp_archive):
samples = (self.qs.filter(activitytype=esp_archive_at).order_by('name'))
for samp in samples:
sample_number = samp.name.split('_')[-1]
samp_depth_time_series = []
for td in (self.qs.filter(name=samp.name)
.values_list('simpledepthtime__epochmilliseconds',
'simpledepthtime__depth', 'name')
.order_by('simpledepthtime__epochmilliseconds')):
samp_depth_time_series.append([td[0], td[1]])
if ' ' in samp.name:
label = '%s %s' % (samp.name.split(' ')[0], sample_number) # Lop off everything after first space in the activity name
else:
label = '%s %s' % (samp.name, sample_number) # Show entire Activity name & sample name
sample_durations.append({'label': label, 'data': samp_depth_time_series})
return(sample_durations)
def getActivityParameterHistograms(self):
'''
Based on the current selected query criteria for activities, return the associated histograms of the selected
parameters as a list of hashes, one hash per parameter with pairs of binlo and bincount for flot to make bar charts.
Order in a somewhat complicated nested structure of hashes of hashes that permit the jQuery client to properly
color and plot the data.
'''
aphHash = {}
pUnits = {}
showAllParameterValuesFlag = getShow_All_Parameter_Values(self.kwargs)
showSigmatParameterValuesFlag = getShow_Sigmat_Parameter_Values(self.kwargs)
showStandardnameParameterValuesFlag = getShow_StandardName_Parameter_Values(self.kwargs)
for pa in models.Parameter.objects.using(self.dbname).all():
# Apply (negative) logic on whether to continue with creating histograms based on checkboxes checked in the queryUI
if not showAllParameterValuesFlag:
if not showStandardnameParameterValuesFlag:
if not showSigmatParameterValuesFlag:
continue
elif pa.standard_name != 'sea_water_sigma_t':
continue
elif not pa.standard_name:
continue
histList = {}
binwidthList = {}
platformList = {}
activityList = {}
# Collect histograms organized by activity and platform names. The SQL execution is sequential, a query
# is executed for each parameter and here we organize by platform and activity.
for aph in self.getActivityParameterHistogramsQS().select_related().filter(
activityparameter__parameter=pa).values('activityparameter__activity__name',
'activityparameter__activity__platform__name', 'binlo', 'binhi', 'bincount').order_by(
'activityparameter__activity__platform__name', 'activityparameter__activity__name', 'binlo'):
# Save histogram data by activity name
if np.isnan(aph['binlo']) or np.isnan(aph['binhi']):
continue
try:
histList[aph['activityparameter__activity__name']].append([aph['binlo'], aph['bincount']])
except KeyError:
# First time seeing this activity name, create a list and add the first histogram point
histList[aph['activityparameter__activity__name']] = []
histList[aph['activityparameter__activity__name']].append([aph['binlo'], aph['bincount']])
binwidthList[aph['activityparameter__activity__name']] = []
binwidthList[aph['activityparameter__activity__name']] = aph['binhi'] - aph['binlo']
platformList[aph['activityparameter__activity__name']] = []
platformList[aph['activityparameter__activity__name']].append(aph['activityparameter__activity__platform__name'])
##logger.debug('pa.name = %s, aname = %s', pa.name, aph['activityparameter__activity__name'])
# Unwind the platformList to get activities by platform name
for an, pnList in list(platformList.items()):
##logger.debug('an = %s, pnList = %s', an, pnList)
for pn in pnList:
try:
activityList[pn].append(an)
except KeyError:
activityList[pn] = []
activityList[pn].append(an)
# Build the final data structure organized by platform -> activity
plHash = {}
for plat in list(activityList.keys()):
##logger.debug('plat = %s', plat)
for an in activityList[plat]:
try:
plHash[plat][an] = {'binwidth': binwidthList[an], 'hist': histList[an]}
except KeyError:
plHash[plat] = {}
plHash[plat][an] = {'binwidth': binwidthList[an], 'hist': histList[an]}
# Assign histogram data to the hash keyed by parameter name
if plHash:
aphHash[pa.name] = plHash
pUnits[pa.name] = pa.units
# Make RGBA colors from the hex colors - needed for opacity in flot bars
rgbas = {}
for plats in list(self.getPlatforms().values()):
for p in plats:
r,g,b = (p[2][:2], p[2][2:4], p[2][4:])
rgbas[p[0]] = 'rgba(%d, %d, %d, 0.4)' % (int(r,16), int(g,16), int(b,16))
return {'histdata': aphHash, 'rgbacolors': rgbas, 'parameterunits': pUnits}
def _build_mpq_queryset(self):
'''Factored out method used to construct query for getting data values to
produce png images of data in the selection - e.g. for Flot and X3D IndexedFaceSets
'''
# Check for parameter-plot-radio button being selected, which inherently ensures that a
# single parameter name is selected for plotting. Modifies member items from member items.
parameterID = None
platformName = None
contourparameterID = None # parameter for Contour plots
contourplatformName = None
parameterGroups = []
contourparameterGroups = []
logger.debug('self.kwargs = %s', self.kwargs)
if self.request.GET.get('showplatforms', False):
# Allow for platform animation without selecting a parameterplot
self.mpq.buildMPQuerySet(*self.args, **self.kwargs)
if 'parameterplot' in self.kwargs:
if self.kwargs['parameterplot'][0]:
parameterID = self.kwargs['parameterplot'][0]
parameter = models.Parameter.objects.using(self.request.META['dbAlias']).get(id=parameterID)
parameterGroups = getParameterGroups(self.request.META['dbAlias'], parameter)
if self.kwargs['parameterplot'][1]:
platformName = self.kwargs['parameterplot'][1]
if parameterID:
self.mpq.buildMPQuerySet(*self.args, **self.kwargs)
if 'parametercontourplot' in self.kwargs:
if self.kwargs['parametercontourplot'][0]:
contourparameterID = self.kwargs['parametercontourplot'][0]
contourparameter = models.Parameter.objects.using(self.request.META['dbAlias']).get(id=contourparameterID)
contourparameterGroups = getParameterGroups(self.request.META['dbAlias'], contourparameter)
if self.kwargs['parametercontourplot'][1]:
contourplatformName = self.kwargs['parametercontourplot'][1]
self.kwargs['parameterplot_id'] = contourparameterID
if contourparameterID:
self.contour_mpq.buildMPQuerySet(*self.args, **self.kwargs)
return parameterID, platformName, contourparameterID, contourplatformName, parameterGroups, contourparameterGroups
def _get_plot_min_max(self, parameterID, contourparameterID):
min_max = self.getParameterMinMax(pid=parameterID)['plot']
if not parameterID and contourparameterID:
min_max = self.getParameterMinMax(pid=contourparameterID)['plot']
return min_max
def getParameterDatavaluePNG(self):
'''
Called when user interface has selected one Parameter for plotting, in which case
produce a depth-time section plot for overlay on the flot plot. Return a png image
file name for inclusion in the AJAX response.
'''
parameterID, platformName, contourparameterID, contourplatformName, parameterGroups, contourparameterGroups = self._build_mpq_queryset()
if parameterID or platformName or contourparameterID or contourplatformName:
pass
else:
return
min_max = self._get_plot_min_max(parameterID, contourparameterID)
if not min_max:
return None, None, 'Cannot plot Parameter'
if SAMPLED in parameterGroups:
# The fourth item should be for SampledParameter if that is the group of the Parameter
cp = MeasuredParameter(self.kwargs, self.request, self.qs, self.mpq.qs_sp_no_order, self.contour_mpq.qs_sp_no_order,
min_max, self.getSampleQS(), platformName,
parameterID, parameterGroups, contourplatformName, contourparameterID, contourparameterGroups)
else:
cp = MeasuredParameter(self.kwargs, self.request, self.qs, self.mpq.qs_mp_no_order, self.contour_mpq.qs_mp_no_order,
min_max, self.getSampleQS(), platformName,
parameterID, parameterGroups, contourplatformName, contourparameterID, contourparameterGroups)
return cp.renderDatavaluesNoAxes()
def _combine_sample_platforms(self, platforms):
'''Mainly for LRAUV data: combine <platform>_ESP_filtering or <platform>_Sipper Platform name
with <platform> for creating the image(s) by renderDatavaluesNoAxes()
'''
has_samples = []
combined_platforms = []
for platform in platforms.split(','):
for sample_type in SAMPLE_TYPES:
if platform.endswith(sample_type):
parent_platform = platform.split('_'+sample_type)[0]
if parent_platform in platforms:
has_samples.append(platform)
has_samples.append(parent_platform)
combined_platforms.append(f"{parent_platform},{platform}")
else:
# Possible to have samples without the parent platform
combined_platforms.append(platform)
if platform not in has_samples:
combined_platforms.append(platform)
return combined_platforms
def getPDV_IFSs(self):
'''Return X3D scene of Parameter DataValue IndexedFaceSets of curtains constructed
from ParameterDatavaluePNG images when contour and 3D data are checked.
'''
x3d_dict = {}
contourFlag = False
if 'showdataas' in self.kwargs:
if self.kwargs['showdataas']:
if self.kwargs['showdataas'][0] == 'contour':
contourFlag = True
if contourFlag and self.kwargs.get('showgeox3dmeasurement'):
# Set a single min_max for all the curtains
parameterID, platformName, contourparameterID, contourplatformName, parameterGroups, contourparameterGroups = self._build_mpq_queryset()
min_max = self._get_plot_min_max(parameterID, contourparameterID)
if not min_max:
return None, None, 'Cannot plot Parameter'
# platformName and contourplatformName are for display purposes and may look like:
# 'daphne,makai_ESP_filtering,tethys,makai'; _combine_sample_platforms() divies them up for image generation
saved_platforms = self.kwargs['platforms']
for pns in self._combine_sample_platforms(platformName):
# Rebuild query set for just this platform as qs_mp_no_order is an MPQuerySet which has no filter() method
self.kwargs['platforms'] = pns.split(',')
platform_single = self.kwargs['platforms'][0]
# All Activities in the selection, do not inlcude 'special Activities' like LRAUV Mission
for act in self.qs.filter(Q(platform__name=platform_single) & ~Q(activitytype__name=LRAUV_MISSION)):
# Set self.mpq.qs_mp to None to bypass the Singleton nature of MPQuery and have _build_mpq_queryset() build new self.mpq items
self.mpq.qs_mp = None
self.kwargs['activitynames'] = [act.name]
parameterID, platformName, contourparameterID, contourplatformName, parameterGroups, contourparameterGroups = self._build_mpq_queryset()
logger.info(f"Rendering image for pns='{pns}', act.name='{act.name}'")
cp = MeasuredParameter(self.kwargs, self.request, self.qs, self.mpq.qs_mp, self.contour_mpq.qs_mp_no_order,
min_max, self.getSampleQS(), pns,
parameterID, parameterGroups, contourplatformName, contourparameterID, contourparameterGroups)
x3d_items, shape_id_dict = cp.curtainX3D(pns, float(self.request.GET.get('ve', 10)),
int(self.request.GET.get('slice_minutes')))
if x3d_items:
x3d_dict.update(x3d_items)
try:
x3d_dict['shape_id_dict'].update(shape_id_dict)
except KeyError:
x3d_dict['shape_id_dict'] = {}
x3d_dict['shape_id_dict'].update(shape_id_dict)
self.kwargs['platforms'] = saved_platforms
if x3d_dict:
x3d_dict['speedup'] = self._get_speedup({act.platform for act in self.qs})
cycInt = (self.max_end_time - self.min_start_time).total_seconds() / x3d_dict['speedup']
x3d_dict['timesensor'] = PlatformAnimation.timesensor_template.format(cycInt=cycInt)
sec_interval = (cp.x[2] - cp.x[1]) * cp.scale_factor
spaced_ts = np.arange(self.min_start_time.timestamp(), self.max_end_time.timestamp(), sec_interval)
x3d_dict['limits'] = (0, len(spaced_ts))
cp.makeColorBar(cp.colorbarPngFileFullPath, cp.pMinMax)
x3d_dict['colorbar'] = cp.colorbarPngFile
return x3d_dict
def getParameterParameterPNG(self):
'''
If at least the X and Y radio buttons are checked produce a scatter plot for delivery back to the client
'''
plotResults = None
if 'parameterparameter' in self.kwargs:
px = self.kwargs['parameterparameter'][0]
py = self.kwargs['parameterparameter'][1]
pc = self.kwargs['parameterparameter'][3]
if (px and py):
# PQuery is used here so as to combine Measured and Sampled Parameters
if not self.pq.qs_mp:
self.pq.buildPQuerySet(*self.args, **self.kwargs)
# We have enough information to generate a 2D scatter plot
##if not self.pp: # ...png always gets called before ...x3d - unless we change the key names...
pMinMax = { 'x': self.getParameterMinMax(px, percentileAggregateType='extrema')['plot'],
'y': self.getParameterMinMax(py, percentileAggregateType='extrema')['plot'],
'c': self.getParameterMinMax(pc)['plot']}
logger.debug('pMinMax = %s', pMinMax)
if not pMinMax['x'] or not pMinMax['y']:
return '', 'Selected x and y axis parameters are not in filtered selection.'
self.pp = ParameterParameter(self.kwargs, self.request, {'x': px, 'y': py, 'c': pc}, self.mpq, self.pq, pMinMax)
try:
ppPngFile, infoText, sql = self.pp.make2DPlot()
except PPDatabaseException as e:
return None, str(e), e.sql
plotResults = ppPngFile, infoText, sql
return plotResults
def getParameterParameterX3D(self):
'''
If at least the X, Y, and Z radio buttons are checked produce an X3D response for delivery back to the client
'''
x3dDict = None
if 'parameterparameter' in self.kwargs:
px = self.kwargs['parameterparameter'][0]
py = self.kwargs['parameterparameter'][1]
pz = self.kwargs['parameterparameter'][2]
pc = self.kwargs['parameterparameter'][3]
logger.debug('px = %s, py = %s, pz = %s, pc = %s', px, py, pz, pc)
if (px and py and pz):
if not self.pq.qs_mp:
self.pq.buildPQuerySet(*self.args, **self.kwargs)
# We have enough information to generate X3D XML
pMinMax = { 'x': self.getParameterMinMax(px, percentileAggregateType='extrema')['plot'],
'y': self.getParameterMinMax(py, percentileAggregateType='extrema')['plot'],
'z': self.getParameterMinMax(pz, percentileAggregateType='extrema')['plot'],
'c': self.getParameterMinMax(pc)['plot'] }
if not pMinMax['x'] or not pMinMax['y'] or not pMinMax['z']:
return '', 'Selected x, y, z, c Parameters not in filtered selection.'
logger.debug('Instantiating Viz.PropertyPropertyPlots for X3D............................................')
self.pp = ParameterParameter(self.kwargs, self.request, {'x': px, 'y': py, 'z': pz, 'c': pc}, self.mpq, self.pq, pMinMax)
try:
x3dDict = self.pp.makeX3D()
except DatabaseError as e:
return '', e
try:
x3dDict['sql'] += ';'
except TypeError:
return '', 'Selected x, y, z, c Parameters not in filtered selection.'
return x3dDict
def _get_speedup(self, platforms=()):
# Hard-code appropriate speedup for different platforms
speedup = 10
for platform in platforms:
if 'BED' in platform.name.upper():
speedup = 1
# Override speedup if provided by request from UI
if self.kwargs.get('speedup'):
speedup = float(self.kwargs.get('speedup')[0])
return speedup
def getMeasuredParameterX3D(self):
'''Returns dictionary of X3D elements for rendering by X3DOM.
The dictionary is arganized by Platform. The dataValuesX3D() method returns items
organized by Activity and slice_minute Shape slices.
'''
x3d_dict = {}
if self.kwargs.get('showgeox3dmeasurement') and 'parameterplot' in self.kwargs:
# Set a single min_max for coloring all the lines
parameterID, platformName, contourparameterID, contourplatformName, parameterGroups, contourparameterGroups = self._build_mpq_queryset()
min_max = self._get_plot_min_max(parameterID, contourparameterID)
if not min_max:
return x3d_dict
# platformName and contourplatformName are for display purposes and may look like:
# 'daphne,makai_ESP_filtering,tethys,makai'; _combine_sample_platforms() divies them up to get by-platform querystrings
saved_platforms = self.kwargs['platforms']
saved_activitynames = self.kwargs['activitynames']
min_sec_interval = 10e10
for pns in self._combine_sample_platforms(platformName):
# Rebuild query set for just this platform as qs_mp_no_order is an MPQuerySet which has no filter() method
self.kwargs['platforms'] = pns.split(',')
platform_single = self.kwargs['platforms'][0]
self.min_start_time = datetime.utcnow()
self.max_end_time = datetime.utcfromtimestamp(0)
# All Activities in the selection, do not inlcude 'special Activities' like LRAUV Mission
for act in self.qs.filter(Q(platform__name=platform_single) & ~Q(activitytype__name=LRAUV_MISSION)):
if act.startdate < self.min_start_time:
self.min_start_time = act.startdate
if act.enddate > self.max_end_time:
self.max_end_time = act.enddate
# Set self.mpq.qs_mp to None to bypass the Singleton nature of MPQuery and have _build_mpq_queryset() build new self.mpq items
self.mpq.qs_mp = None
if saved_activitynames and act.name not in saved_activitynames:
continue
self.kwargs['activitynames'] = [act.name]
parameterID, platformName, contourparameterID, contourplatformName, parameterGroups, contourparameterGroups = self._build_mpq_queryset()
logger.info(f"Getting dataValues for pns='{pns}', act.name='{act.name}'")
cp = MeasuredParameter(self.kwargs, self.request, self.qs, self.mpq.qs_mp, self.contour_mpq.qs_mp_no_order,
min_max, self.getSampleQS(), pns,
parameterID, parameterGroups, contourplatformName, contourparameterID, contourparameterGroups)
x3d_items, shape_id_dict = cp.dataValuesX3D(platform_single, float(self.request.GET.get('ve', 10)),
int(self.request.GET.get('slice_minutes', 30)))
if x3d_items:
x3d_dict.update(x3d_items)
try:
x3d_dict['shape_id_dict'].update(shape_id_dict)
except KeyError:
x3d_dict['shape_id_dict'] = {}
x3d_dict['shape_id_dict'].update(shape_id_dict)
try:
sec_interval = (cp.x[1] - cp.x[0]) * cp.scale_factor
except IndexError as e:
logger.warning(f"{e}: Likely no data in this slice.")
continue
if sec_interval < min_sec_interval and sec_interval > 0:
min_sec_interval = sec_interval
self.kwargs['platforms'] = saved_platforms
self.kwargs['activitynames'] = saved_activitynames
if x3d_dict:
x3d_dict['speedup'] = self._get_speedup({act.platform for act in self.qs})
cycInt = (self.max_end_time - self.min_start_time).total_seconds() / x3d_dict['speedup']
x3d_dict['timesensor'] = PlatformAnimation.timesensor_template.format(cycInt=cycInt)
spaced_ts = np.arange(self.min_start_time.timestamp(), self.max_end_time.timestamp(), min_sec_interval)
x3d_dict['limits'] = (0, len(spaced_ts))
cp.makeColorBar(cp.colorbarPngFileFullPath, cp.pMinMax)
x3d_dict['colorbar'] = cp.colorbarPngFile
return x3d_dict
def getPlatformAnimation(self):
'''
Based on the current selected query criteria for activities,
return the associated PlatformAnimation time series of X3D scene graph.
If roll, pitch and yaw exist as the platform standard names include
orienation angles, otherwise returns just the position animation scene.
'''
orientDict = {}
if self.request.GET.get('showplatforms', False):
self.mpq.qs_mp = None
self.kwargs['activitynames'] = []
parameterID, platformName, contourparameterID, contourplatformName, parameterGroups, contourparameterGroups = self._build_mpq_queryset()
# Test if there are any X3D platform models in the selection
platformsHavingModels = {pr.platform for pr in models.PlatformResource.objects.using(
self.dbname).filter(resource__resourcetype__name=X3DPLATFORMMODEL,
platform__in=[a.platform for a in self.qs])}
platforms_trajectories = {ar.activity.platform for ar in models.ActivityResource.objects.using(
self.dbname).filter(resource__name='featureType', resource__value='trajectory',
activity__platform__in=[a.platform for a in self.qs])}
# For detecting non-trajectory BEDS that have rotation data (ANGLE, AXIS_X, AXIS_Y, AXIS_Z)
# This is a weak test (for just 'AXIS_X'), but with also weak consequences, maybe an error
# reported to the UI if the other required Parameters are not present
platforms_rotations = {ar.activity.platform for ar in models.ActivityResource.objects.using(
self.dbname).filter(activity__activityparameter__parameter__name='AXIS_X',
activity__platform__in=[a.platform for a in self.qs])}
platforms_to_animate = platformsHavingModels & (platforms_trajectories | platforms_rotations)
if platforms_to_animate:
# Use qs_mp_no_parm QuerySet as it contains roll, pitch, and yaw values
mppa = PlatformAnimation(platforms_to_animate, self.kwargs,
self.request, self.qs, self.mpq.qs_mp_no_parm)
speedup = self._get_speedup(platforms_to_animate)
# Default vertical exaggeration is 10x and default geoorigin is empty string
orientDict = mppa.platformAnimationDataValuesForX3D(
float(self.request.GET.get('ve', 10)),
self.request.GET.get('geoorigin', ''),
scale=1, speedup=speedup)
return orientDict
def getParameterPlatforms(self):
'''
Return hash of parmameter ids (keys) and the platforms (a list) that measured/sampled them
'''
ppHash = {}
pp_qs = (models.ActivityParameter.objects.using(self.dbname)
.filter(activity__in=self.qs)
.values('parameter__id', 'activity__platform__name')
.distinct())
if self.kwargs.get('activitynames'):
pp_qs = pp_qs.filter(activity__name__in=self.kwargs.get('activitynames'))
for ap in pp_qs:
try:
ppHash[ap['parameter__id']].append(ap['activity__platform__name'])
except KeyError:
ppHash[ap['parameter__id']] = []
ppHash[ap['parameter__id']].append(ap['activity__platform__name'])
return ppHash
def getX3DTerrains(self):
'''
Query Resources to get any X3D Terrain information for this Campaign and return as a hash for the STOQS UI to use
'''
x3dtHash = {}
try:
for r in models.Resource.objects.using(self.dbname).filter(resourcetype__name='x3dterrain').all():
try:
x3dtHash[r.uristring][r.name] = r.value
except KeyError:
x3dtHash[r.uristring] = {}
x3dtHash[r.uristring][r.name] = r.value
except DatabaseError as e:
logger.warn('No resourcetype__name of x3dterrain in %s: %s', self.dbname, e)
return x3dtHash
def getX3DPlaybacks(self):
'''
Query Resources to get any X3D Playback information for the Activities remaining in the selection
'''
x3dpHash = {}
try:
for r in models.Resource.objects.using(self.dbname).filter(resourcetype__name='x3dplayback').values(
'uristring', 'name', 'value', 'activityresource__activity__name'):
ms = models.Measurement.objects.using(self.dbname).filter(instantpoint__activity__name=r['activityresource__activity__name'])
try:
x3dpHash[r['uristring']][r['name']] = r['value']
x3dpHash[r['uristring']]['startGeoCoords'] = '%s %s %s' % (ms[0].geom.y, ms[0].geom.x, -ms[0].depth)
except KeyError:
x3dpHash[r['uristring']] = {}
x3dpHash[r['uristring']][r['name']] = r['value']
x3dpHash[r['uristring']]['startGeoCoords'] = '%s %s %s' % (ms[0].geom.y, ms[0].geom.x, -ms[0].depth)
except DatabaseError as e:
logger.warn('No resourcetype__name of x3dplayback in %s: %s', self.dbname, e)
return x3dpHash
def getResources(self):
'''
Query ActivityResources for Resources remaining in Activity selection
'''
netcdfHash = {}
# Simple name/value attributes
logger.debug("Begining to loop though ActivityResource query to build netcdfHash...")
for ar in models.ActivityResource.objects.using(self.dbname).filter(activity__in=self.qs
,resource__name__in=['title', 'summary', 'opendap_url']
).values('activity__platform__name', 'activity__name', 'activity__comment', 'resource__name', 'resource__value'):
try:
netcdfHash[ar['activity__platform__name']][ar['activity__name']][ar['resource__name']] = ar['resource__value']
netcdfHash[ar['activity__platform__name']][ar['activity__name']]['comment'] = ar['activity__comment']
except KeyError:
try:
netcdfHash[ar['activity__platform__name']][ar['activity__name']] = {}
except KeyError:
netcdfHash[ar['activity__platform__name']] = {}
netcdfHash[ar['activity__platform__name']][ar['activity__name']] = {}
netcdfHash[ar['activity__platform__name']][ar['activity__name']][ar['resource__name']] = ar['resource__value']
netcdfHash[ar['activity__platform__name']][ar['activity__name']]['comment'] = ar['activity__comment']
logger.debug("Done building netcdfHash.")
# Quick Look plots
qlHash = {}
logger.debug("Begining to loop though ActivityResource query to build qlHash...")
for ar in models.ActivityResource.objects.using(self.dbname).filter(activity__in=self.qs, resource__resourcetype__name='quick_look').values(
'activity__platform__name', 'activity__name', 'resource__name', 'resource__uristring'):
try:
qlHash[ar['activity__platform__name']][ar['activity__name']][ar['resource__name']] = ar['resource__uristring']
except KeyError:
try:
qlHash[ar['activity__platform__name']][ar['activity__name']] = {}
except KeyError:
qlHash[ar['activity__platform__name']] = {}
qlHash[ar['activity__platform__name']][ar['activity__name']] = {}
qlHash[ar['activity__platform__name']][ar['activity__name']][ar['resource__name']] = ar['resource__uristring']
logger.debug("Done building qlHash.")
# Campaign information
c_hash = {}
logger.debug("Begining to loop though ActivityResource query to build c_hash...")
for cr in models.CampaignResource.objects.using(self.dbname).all():
c_hash[cr.resource.name] = cr.resource.value
logger.debug("Done building c_hash.")
return {'netcdf': netcdfHash, 'quick_look': qlHash, 'campaign': c_hash}
def getAttributes(self):
'''
Query for "Attributes" which are specific ResourceTypes or fields of other classes. Initially for tagged measurements
and for finding comments about Samples, but can encompass any other way a STOQS database may be filtered os searched.
May 2019: Added LRAUV Missions -- shoe-horning into the mplabel scheme developed for machine learning, cause it mostly fits.
'''
measurementHash = {}
sources = models.ResourceResource.objects.using(self.dbname).filter(toresource__name=COMMANDLINE
).values_list('fromresource__resourcetype__name', 'toresource__value').distinct()
if sources:
logger.debug('Building commandlines element in measurementHash...')
measurementHash['commandlines'] = dict((s[0], s[1]) for s in sources)
else:
# Check for LRAUV Missions
sources = (models.ResourceResource.objects.using(self.dbname)
.filter(toresource__name=LRAUV_MISSION)
.values_list('fromresource__resourcetype__name', 'toresource__value')
.distinct())
if sources:
logger.debug('Building "syslogs" element in measurementHash...')
measurementHash['commandlines'] = dict((s[0], s[1]) for s in sources)
for mpr in models.MeasuredParameterResource.objects.using(self.dbname).filter(activity__in=self.qs
,resource__name__in=[LABEL, LRAUV_MISSION]).values( 'resource__resourcetype__name', 'resource__value',
'resource__id').distinct().order_by('resource__value'):
# Include all description resources associated with this label
descriptions = ' '.join(models.ResourceResource.objects.using(self.dbname).filter(fromresource__id=mpr['resource__id'],
toresource__name=DESCRIPTION).values_list('toresource__value', flat=True))
try:
measurementHash[mpr['resource__resourcetype__name']].append((mpr['resource__id'], mpr['resource__value'], descriptions))
except KeyError:
measurementHash[mpr['resource__resourcetype__name']] = []
measurementHash[mpr['resource__resourcetype__name']].append((mpr['resource__id'], mpr['resource__value'], descriptions))
logger.debug('Returning from getAttributes with measurementHash = %s', measurementHash)
return {'measurement': measurementHash}
#
# Methods that generate Q objects used to populate the query.
#
def _sampledparametersgroupQ(self, parameterid, fromTable='Activity'):
'''
Build a Q object to be added to the current queryset as a filter. This should
ensure that our result doesn't contain any parameter names that were not selected.
We use id for sampledparametersgroup as the name may contain special characters.
'''
q = Q()
if parameterid is None:
return q
else:
if fromTable == 'Activity':
q = Q(activityparameter__parameter__id__in=parameterid)
elif fromTable == 'Sample':
q = Q(sampledparameter__parameter__id__in=parameterid)
elif fromTable == 'ActivityParameter':
q = Q(parameter__id__in=parameterid)
elif fromTable == 'ActivityParameterHistogram':
q = Q(activityparameter__parameter__id__in=parameterid)
return q
def _measuredparametersgroupQ(self, parameterid, fromTable='Activity'):
'''
Build a Q object to be added to the current queryset as a filter. This should
ensure that our result doesn't contain any parameters that were not selected.
'''
q = Q()
if parameterid is None:
return q
else:
if fromTable == 'Activity':
q = Q(activityparameter__parameter__id__in=parameterid)
elif fromTable == 'Sample':
# Use sub-query to find all Samples from Activities that are in the existing Activity queryset
# Note: must do the monkey patch in __init__() so that Django's django/db/models/sql/query.py
# statement "sql, params = self.get_compiler(DEFAULT_DB_ALIAS).as_sql()" uses the right connection.
# This is not a Django bug according to source code comment at:
# https://github.com/django/django/blob/master/django/db/models/sql/query.py
q = Q(instantpoint__activity__in=self.qs)
elif fromTable == 'ActivityParameter':
# Use sub-query to restrict ActivityParameters to those that are in the list of Activities in the selection
q = Q(activity__in=self.qs)
elif fromTable == 'ActivityParameterHistogram':
# Use sub-query to find all ActivityParameterHistogram from Activities that are in the existing Activity queryset
q = Q(activityparameter__activity__in=self.qs)
return q
def _parameterstandardnameQ(self, parameterstandardname, fromTable='Activity'):
'''
Build a Q object to be added to the current queryset as a filter. This should
ensure that our result doesn't contain any parameter standard_names that were not selected.
'''
q = Q()
if parameterstandardname is None:
return q
else:
if fromTable == 'Activity':
q = Q(activityparameter__parameter__standard_name__in=parameterstandardname)
elif fromTable == 'Sample':
# Use sub-query to find all Samples from Activities that are in the existing Activity queryset
q = Q(instantpoint__activity__in=self.qs)
elif fromTable == 'ActivityParameter':
q = Q(activity__in=self.qs)
elif fromTable == 'ActivityParameterHistogram':
# Use sub-query to find all ActivityParameterHistogram from Activities that are in the existing Activity queryset
q = Q(activityparameter__activity__in=self.qs)
return q
def _platformsQ(self, platforms, fromTable='Activity'):
'''
Build a Q object to be added to the current queryset as a filter. This will ensure that we
only generate the other values/sets for platforms that were selected.
'''
q = Q()
if platforms is None:
return q
else:
if fromTable == 'Activity':
q = Q(platform__name__in=platforms)
elif fromTable == 'Sample':
# Use sub-query to find all Samples from Activities that are in the existing Activity queryset
q = Q(instantpoint__activity__in=self.qs)
elif fromTable == 'ActivityParameter':
q = Q(activity__in=self.qs)
elif fromTable == 'ActivityParameterHistogram':
# Use sub-query to find all ActivityParameterHistogram from Activities that are in the existing Activity queryset
q = Q(activityparameter__activity__in=self.qs)
return q
def _timeQ(self, times, fromTable='Activity'):
'''
Build a Q object to be added to the current queryset as a filter. This ensures that we limit
things down based on the time range selected by the user.
'''
q = Q()
if not times:
return q
if times[0] is not None:
if fromTable == 'Activity':
q = Q(enddate__gte=times[0])
elif fromTable == 'Sample':
q = Q(instantpoint__timevalue__gte=times[0])
elif fromTable == 'ActivityParameter':
q = Q(activity__enddate__gte=times[0])
elif fromTable == 'ActivityParameterHistogram':
q = Q(activityparameter__activity__enddate__gte=times[0])
if times[1] is not None:
if fromTable == 'Activity':
q = q & Q(startdate__lte=times[1])
elif fromTable == 'Sample':
q = q & Q(instantpoint__timevalue__lte=times[1])
elif fromTable == 'ActivityParameter':
q = q & Q(activity__startdate__lte=times[1])
elif fromTable == 'ActivityParameterHistogram':
q = q & Q(activityparameter__activity__startdate__lte=times[1])
return q
def _depthQ(self, depth, fromTable='Activity'):
'''
Build a Q object to be added to the current queryset as a filter. Once again, we want
to make sure that we only generate the "leftover" components based on the selected depth
range.
'''
q = Q()
if not depth:
return q
if depth[0] is not None:
if fromTable == 'Activity':
q = Q(maxdepth__gte=depth[0])
elif fromTable == 'Sample':
q = Q(depth__gte=depth[0])
elif fromTable == 'ActivityParameter':
q = Q(activity__maxdepth__gte=depth[0])
elif fromTable == 'ActivityParameterHistogram':
q = Q(activityparameter__activity__maxdepth__gte=depth[0])
if depth[1] is not None:
if fromTable == 'Activity':
q = q & Q(mindepth__lte=depth[1])
elif fromTable == 'Sample':
q = q & Q(depth__lte=depth[1])
elif fromTable == 'ActivityParameter':
q = q & Q(activity__mindepth__lte=depth[1])
elif fromTable == 'ActivityParameterHistogram':
q = q & Q(activityparameter__activity__mindepth__lte=depth[1])
return q
def _mplabelsQ(self, resourceids, fromTable='Activity'):
'''
Build a Q object to be added to the current queryset as a filter. This will ensure that we
only generate the other values/sets for attributes (initially resources that have names of 'label'
that are MeasuredParameter labels) that were selected.
'''
q = Q()
if not resourceids:
return q
else:
if fromTable == 'Activity':
q = Q(id__in=models.MeasuredParameterResource.objects.using(self.dbname).filter(
resource__id__in=resourceids).values_list('activity__id').distinct())
elif fromTable == 'ActivityParameter':
q = Q(activity__id__in=models.MeasuredParameterResource.objects.using(self.dbname).filter(
resource__id__in=resourceids).values_list('activity__id').distinct())
return q
def _trajectoryQ(self):
'''
Return Q object that is True if the activity is of featureType trajectory
'''
# Restrict selection to Activities that are trajectories. Can have pre CF-1.6 UCDD and CF-1.6 and later metadata.
udcc_q1 = Q(activityresource__resource__name__iexact='thredds_data_type') & Q(activityresource__resource__value__iexact='Trajectory')
udcc_q2 = Q(activityresource__resource__name__iexact='cdm_data_type') & Q(activityresource__resource__value__iexact='trajectory')
udcc_q3 = Q(activityresource__resource__name__iexact='CF%3afeatureType') & Q(activityresource__resource__value__iexact='trajectory')
udcc_q4 = Q(activityresource__resource__name__iexact='CF_featureType') & Q(activityresource__resource__value__iexact='trajectory')
cf16_q = Q(activityresource__resource__name__iexact='featureType') & Q(activityresource__resource__value__iexact='trajectory')
q = (udcc_q1 | udcc_q2 | udcc_q3 | udcc_q4 | cf16_q)
return q
def _timeSeriesQ(self):
'''
Return Q object that is True if the activity is of featureType timeSeries
'''
# Restrict selection to Activities that are trajectories. Can have pre CF-1.6 UCDD and CF-1.6 and later metadata.
udcc_q1 = Q(activityresource__resource__name__iexact='thredds_data_type') & Q(activityresource__resource__value__iexact='station')
udcc_q2 = Q(activityresource__resource__name__iexact='cdm_data_type') & Q(activityresource__resource__value__iexact='station')
cf16_q = Q(activityresource__resource__name__iexact='featureType') & Q(activityresource__resource__value__iexact='timeSeries')
q = (udcc_q1 | udcc_q2 | cf16_q)
return q
def _timeSeriesProfileQ(self):
'''
Return Q object that is True if the activity is of featureType timeSeries
'''
# Restrict selection to Activities that are trajectories. Can have pre CF-1.6 UCDD and CF-1.6 and later metadata.
udcc_q1 = Q(activityresource__resource__name__iexact='thredds_data_type') & Q(activityresource__resource__value__iexact='station')
udcc_q2 = Q(activityresource__resource__name__iexact='cdm_data_type') & Q(activityresource__resource__value__iexact='station')
cf16_q = Q(activityresource__resource__name__iexact='featureType') & Q(activityresource__resource__value__iexact='timeSeriesProfile')
q = (udcc_q1 | udcc_q2 | cf16_q)
return q
def _trajectoryProfileQ(self):
'''
Return Q object that is True if the activity is of featureType trajectoryProfile
'''
# Restrict selection to Activities that are trajectoryProfiles - a featureType new in CF-1.6
cf16_q = Q(activityresource__resource__name__iexact='featureType') & Q(activityresource__resource__value__iexact='trajectoryProfile')
q = (cf16_q)
return q
#
# Methods to get the query used based on the current Q object.
#
def getSQLWhere(self):
'''
This method will generate a pseudo-query, and then normalize it to a standard SQL query. While for
PostgreSQL this is usually the actual query, we might need to massage it a bit to handle quoting
issues and such. The string representation of the queryset's query attribute gives us the query.
This is really useful when we want to generate a new mapfile based on the current query result. We just want
the WHERE clause of the query, since that's where the predicate exists.
'''
querystring = str(self.qs.query)
return querystring
def getActivityGeoQuery(self, Q_object = None, pointFlag=False):
'''
This method generates a string that can be put into a Mapserver mapfile DATA statment.
It is for returning Activities. If @param pointFlag is True then postgresifySQL() will
deliver the mappoint field as geom, otherwise it will deliver maptrack (trajectory) as geom.
'''
qs = self.qs
# Add any more filters (Q objects) if specified
if Q_object:
qs = qs.filter(Q_object)
if self.kwargs.get('activitynames'):
qs = qs.filter(name__in=self.kwargs.get('activitynames'))
# Query for mapserver
geo_query = 'geom from (%s) as subquery using unique gid using srid=4326' % postgresifySQL(qs.query, pointFlag).rstrip()
return geo_query
def getSampleGeoQuery(self, Q_object = None):
'''
This method generates a string that can be put into a Mapserver mapfile DATA statment.
It is for returning Samples.
'''
qs = self.sample_qs
if not qs:
return ''
# Add any more filters (Q objects) if specified
if Q_object:
qs = self.sample_qs.using(self.dbname).filter(Q_object)
if self.kwargs.get('activitynames'):
qs = qs.filter(instantpoint__activity__name__in=self.kwargs.get('activitynames'))
# Query for mapserver
geo_query = 'geom from (%s) as subquery using unique gid using srid=4326' % postgresifySQL(qs.query, sampleFlag=True)
logger.debug('geo_query = %s', geo_query)
return geo_query
def getSampleExtent(self, geoqueryset, srid=4326):
"""
Accepts a GeoQuerySet and SRID.
Returns the extent as a GEOS object in the Google Maps projection.
The result can be directly passed out for direct use in OpenLayers.
"""
area = geoqueryset.area()
extent = fromstr('MULTIPOINT (%s %s, %s %s)' % geoqueryset.extent(), srid=srid)
ul = extent[0]
lr = extent[1]
dist = ul.distance(lr)
# if the points are all in one location then expand the extent so openlayers
# will zoom to something that is visible
if not dist:
ul.x = ul.x-0.15
ul.y = ul.y+0.15
lr.x = lr.x+0.15
lr.y = lr.y-0.15
extent = MultiPoint(ul,lr)
extent.srid = srid
extent.transform(self.spherical_mercator_srid)
return extent
def getExtent(self, srid=4326, outputSRID=spherical_mercator_srid):
'''
Return GEOSGeometry extent of all the geometry contained in the Activity and Sample geoquerysets.
The result can be directly passed out for direct use in a OpenLayers.
'''
extent = None
# Check all geometry types encountered in Activity GeoQuerySet in priority order
extentList = []
for geom_field in (('maptrack', 'mappoint', 'plannedtrack')):
try:
qs_ext = self.qs.aggregate(Extent(geom_field))
extentList.append(qs_ext[geom_field + '__extent'])
except DatabaseError:
logger.warn('Database %s does not have field %s', self.dbname, geom_field)
except TypeError:
pass
##logger.debug('Field %s is Null in Activity GeoQuerySet: %s', geom_field, str(self.qs) )
# Append the Sample geometries
try:
sqs = self.getSampleQS()
extentList.append(sqs.extent(field_name='geom'))
except:
logger.debug('Could not get an extent for Sample GeoQuerySet')
# Take the union of all geometry types found in Activities and Samples
logger.debug("Collected %d geometry extents from Activities and Samples", len(extentList))
geom_union = None
if extentList:
logger.debug('extentList = %s', extentList)
# Initialize geom_union with first not None extent
for index, ext in enumerate(extentList):
if ext is not None:
geom_union = fromstr('LINESTRING (%s %s, %s %s)' % ext, srid=srid)
break
# Union additional extents
for extent in extentList[index:]:
if extent is not None:
if extent[0] == extent[2] and extent[1] == extent[3]:
logger.debug('Unioning extent = %s as a Point', extent)
geom_union = geom_union.union(Point(*extent[:2], srid=srid))
else:
logger.debug('Unioning extent = %s as a LINESTRING', extent)
geom_union = geom_union.union(fromstr('LINESTRING (%s %s, %s %s)' % extent, srid=srid))
# Aggressive try/excepts done here for better reporting on the production servers
if geom_union:
logger.debug('Final geom_union = %s', geom_union)
else:
logger.exception('geom_union could not be set from extentList = %s', extentList)
return ([], None, None)
try:
geomstr = 'LINESTRING (%s %s, %s %s)' % geom_union.extent
except TypeError:
logger.exception('Tried to get extent for self.qs.query = %s, but failed. Check the database loader and make sure a geometry type (maptrack or mappoint) is assigned for each activity.', str(self.qs.query))
except ValueError:
logger.exception('Tried to get extent for self.qs.query = %s, but failed. Check the database loader and make sure a geometry type (maptrack or mappoint) is assigned for each activity.', str(self.qs.query))
else:
logger.debug('geomstr = %s', geomstr)
try:
extent = fromstr(geomstr, srid=srid)
except:
logger.exception('Could not get extent for geomstr = %s, srid = %d', geomstr, srid)
# Compute midpoint of extent for use in GeoViewpoint for Virtual Reality (WebVR) viewpoint setting
lon_midpoint = (extent[0][0] + extent[1][0]) / 2.0
lat_midpoint = (extent[0][1] + extent[1][1]) / 2.0
qs = self.qs.aggregate(Max('maxdepth'), Min('mindepth'))
depth_midpoint = (qs['mindepth__min'] + qs['maxdepth__max']) / 2.0
if np.isnan(depth_midpoint):
depth_midpoint = 0.0
try:
extent.transform(outputSRID)
except:
logger.exception('Cannot get transorm to %s for geomstr = %s, srid = %d', outputSRID, geomstr, srid)
logger.debug('Returning from getExtent() with extent = %s', extent)
return (extent, lon_midpoint, lat_midpoint, depth_midpoint)
|
MBARIMike/stoqs
|
stoqs/utils/STOQSQManager.py
|
Python
|
gpl-3.0
| 136,368
|
[
"NetCDF"
] |
2cd93c9285e606d1e292bcd2cdc2c9467cb95f126048383c9f839df83d1e2d95
|
import ast
import weakref
import sys
import collections
from greentype.utils import is_collection
def decorated_with(node, name):
if not isinstance(node, (ast.ClassDef, ast.FunctionDef)):
raise ValueError('Illegal node type "{}". Should be either class '
'or function definition.'.format(type(node).__name__))
decorators = [attributes_chain_to_name(d) for d in node.decorator_list]
return name in decorators
def attributes_chain_to_name(node):
parts = []
while isinstance(node, ast.Attribute):
parts.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
parts.append(node.id)
return '.'.join(reversed(parts))
else:
return None
def node_name(node):
return getattr(node, 'id', None) or getattr(node, 'name', None)
def node_parent(node):
parent = getattr(node, '_parent', None)
return parent() if parent is not None else None
def interlink_ast(root):
parents_stack = []
def transform(node):
if isinstance(node, ast.AST):
if parents_stack:
# TODO: may be better to use weakref here
node._parent = weakref.ref(parents_stack[-1])
# property can't be defined outside of class, because invoked via
# __getattribute__ machinery
# and unfortunately ast.AST can't be patched either ;)
# node.parent = property(fget=lambda x: node._parent())
# node._parent = parents_stack[-1]
else:
node._parent = None
parents_stack.append(node)
for child in ast.iter_child_nodes(node):
transform(child)
parents_stack.pop()
transform(root)
def find_parent(node, cls, stop_cls=ast.Module, strict=True):
if strict and node is not None:
node = node_parent(node)
while node and not isinstance(node, stop_cls):
if isinstance(node, cls):
return node_parent(node)
return None
def find_parents(node, cls, stop_cls=ast.Module, strict=True):
if strict and node is not None:
node = node_parent(node)
parents = []
while node and not isinstance(node, stop_cls):
if isinstance(node, cls):
parents.append(node)
node = node_parent(node)
return reversed(parents)
INDENT_SIZE = 2
def format_node(node, include_fields=False):
fields = collections.OrderedDict()
fields['line'] = getattr(node, 'lineno', '<unknown>')
fields['col'] = getattr(node, 'col_offset', '<unknown>')
if include_fields:
for field_name, field in ast.iter_fields(node):
if not isinstance(field, ast.AST) and not is_collection(field):
fields[field_name] = field
formatted_pairs = ['{}={!r}'.format(k, v) for k, v in fields.items()]
return '{}({})'.format(type(node).__name__, ' '.join(formatted_pairs))
def dump_ast(node, indent=0):
if isinstance(node, ast.AST):
first_line = ' ' * indent + format_node(node)
child_lines = []
for name, value in ast.iter_fields(node):
child_indent = ' ' * (indent + INDENT_SIZE)
if is_collection(value):
child_lines.append('{}{}: *'.format(child_indent, name))
child_lines.extend(dump_ast(c, indent + INDENT_SIZE * 2) for c in value)
else:
field_fmt = dump_ast(value, indent + INDENT_SIZE)
child_lines.append('{}{}: {}'.format(child_indent, name, field_fmt.lstrip()))
if child_lines:
return first_line + '\n' + '\n'.join(child_lines)
return first_line
else:
return '{}{!r}'.format(' ' * indent, node)
class DumpVisitor(ast.NodeVisitor):
def __init__(self, increment=INDENT_SIZE):
super(DumpVisitor, self).__init__()
self.increment = increment
self.lines = []
self.indent = 0
def visit(self, node):
self.lines.append(' ' * self.indent + format_node(node, True))
self.indent += self.increment
self.generic_visit(node)
self.indent -= self.increment
def dump(self):
print(self.dumps())
def dumps(self):
return '\n'.join(self.lines)
def main(path):
with open(path) as f:
root_node = ast.parse(f.read(), path)
# print(ast.dump(root_node, include_attributes=True))
print(dump_ast(root_node))
# visitor = DumpVisitor()
# visitor.visit(ast.parse(f.read(), path))
# visitor.dump()
class GeneratorVisitor(object):
def visit(self, node):
if isinstance(node, ast.AST):
method_name = 'visit_' + node.__class__.__name__
for value in getattr(self, method_name, self.generic_visit)(node):
yield value
def generic_visit(self, node):
for node in ast.iter_child_nodes(node):
for value in self.visit(node):
yield value
if __name__ == '__main__':
main(sys.argv[1])
|
east825/green-type
|
greentype/ast_utils.py
|
Python
|
mit
| 5,055
|
[
"VisIt"
] |
f96ed75c590246aa9f97c3afe30166838bde65479522372f7b6521cbda448c5e
|
#! /usr/bin/env python
########################################################################
# $HeadURL: $
########################################################################
__RCSID__ = "$Id: $"
from DIRAC import exit as DIRACExit
from DIRAC.Core.Base import Script
Script.setUsageMessage("""
Clean the given directory or a list of directories by removing it and all the
contained files and subdirectories from the physical storage and from the
file catalogs.
Usage:
%s <lfn | fileContainingLfns> <SE> <status>
""" % Script.scriptName)
Script.parseCommandLine()
import sys,os
from DIRAC.Core.Utilities.List import sortList,randomize
if len(sys.argv) < 2:
Script.showHelp()
DIRACExit( -1 )
else:
inputFileName = sys.argv[1]
if os.path.exists(inputFileName):
inputFile = open(inputFileName,'r')
string = inputFile.read()
lfns = sortList(string.splitlines(),True)
inputFile.close()
else:
lfns = [inputFileName]
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
dm = DataManager()
for lfn in sortList(lfns):
lfn = lfn.strip()
if not lfn: continue
print "Cleaning directory %s ... " % lfn,
sys.stdout.flush()
result = dm.cleanLogicalDirectory( lfn )
if result['OK']:
print 'OK'
else:
print "ERROR: %s" % result['Message']
|
sposs/DIRAC
|
DataManagementSystem/scripts/dirac-dms-clean-directory.py
|
Python
|
gpl-3.0
| 1,311
|
[
"DIRAC"
] |
f0b43774e1f07d51f65e1c729ec4104029eea3785968268555893c3b1325e7d1
|
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Command to extract the dependancy tree for a given package."""
from __future__ import print_function
import json
import portage # pylint: disable=F0401
from parallel_emerge import DepGraphGenerator
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
def FlattenDepTree(deptree, pkgtable=None, parentcpv=None, get_cpe=False):
"""Simplify dependency json.
Turn something like this (the parallel_emerge DepsTree format):
{
"app-admin/eselect-1.2.9": {
"action": "merge",
"deps": {
"sys-apps/coreutils-7.5-r1": {
"action": "merge",
"deps": {},
"deptype": "runtime"
},
...
}
}
}
...into something like this (the cros_extract_deps format):
{
"app-admin/eselect-1.2.9": {
"deps": ["coreutils-7.5-r1"],
"rev_deps": [],
"name": "eselect",
"category": "app-admin",
"version": "1.2.9",
"full_name": "app-admin/eselect-1.2.9",
"action": "merge"
},
"sys-apps/coreutils-7.5-r1": {
"deps": [],
"rev_deps": ["app-admin/eselect-1.2.9"],
"name": "coreutils",
"category": "sys-apps",
"version": "7.5-r1",
"full_name": "sys-apps/coreutils-7.5-r1",
"action": "merge"
}
}
Args:
deptree: The dependency tree.
pkgtable: The package table to update. If None, create a new one.
parentcpv: The parent CPV.
get_cpe: If set True, include CPE in the flattened dependency tree.
Returns:
A flattened dependency tree.
"""
if pkgtable is None:
pkgtable = {}
for cpv, record in deptree.iteritems():
if cpv not in pkgtable:
cat, nam, ver, rev = portage.versions.catpkgsplit(cpv)
pkgtable[cpv] = {'deps': [],
'rev_deps': [],
'name': nam,
'category': cat,
'version': '%s-%s' % (ver, rev),
'full_name': cpv,
'cpes': [],
'action': record['action']}
if get_cpe:
pkgtable[cpv]['cpes'].extend(GetCPEFromCPV(cat, nam, ver))
# If we have a parent, that is a rev_dep for the current package.
if parentcpv:
pkgtable[cpv]['rev_deps'].append(parentcpv)
# If current package has any deps, record those.
for childcpv in record['deps']:
pkgtable[cpv]['deps'].append(childcpv)
# Visit the subtree recursively as well.
FlattenDepTree(record['deps'], pkgtable=pkgtable, parentcpv=cpv,
get_cpe=get_cpe)
return pkgtable
def GetCPEFromCPV(category, package, version):
"""Look up the CPE for a specified Portage package.
Args:
category: The Portage package's category, e.g. "net-misc"
package: The Portage package's name, e.g. "curl"
version: The Portage version, e.g. "7.30.0"
Returns:
A list of CPE Name strings, e.g.
["cpe:/a:curl:curl:7.30.0", "cpe:/a:curl:libcurl:7.30.0"]
"""
equery_cmd = ['equery', 'm', '-U', '%s/%s' % (category, package)]
lines = cros_build_lib.RunCommand(equery_cmd, error_code_ok=True,
print_cmd=False,
redirect_stdout=True).output.splitlines()
# Look for lines like "Remote-ID: cpe:/a:kernel:linux-pam ID: cpe"
# and extract the cpe URI.
cpes = []
for line in lines:
if 'ID: cpe' not in line:
continue
cpes.append('%s:%s' % (line.split()[1], version.replace('_', '')))
# Note that we're assuming we can combine the root of the CPE, taken
# from metadata.xml, and tack on the version number as used by
# Portage, and come up with a legitimate CPE. This works so long as
# Portage and CPE agree on the precise formatting of the version
# number, which they almost always do. The major exception we've
# identified thus far is that our ebuilds have a pattern of inserting
# underscores prior to patchlevels, that neither upstream nor CPE
# use. For example, our code will decide we have
# cpe:/a:todd_miller:sudo:1.8.6_p7 yet the advisories use a format
# like cpe:/a:todd_miller:sudo:1.8.6p7, without the underscore. (CPE
# is "right" in this example, in that it matches www.sudo.ws.)
#
# Removing underscores seems to improve our chances of correctly
# arriving at the CPE used by NVD. However, at the end of the day,
# ebuild version numbers are rev'd by people who don't have "try to
# match NVD" as one of their goals, and there is always going to be
# some risk of minor formatting disagreements at the version number
# level, if not from stray underscores then from something else.
#
# This is livable so long as you do some fuzzy version number
# comparison in your vulnerability monitoring, between what-we-have
# and what-the-advisory-says-is-affected.
return cpes
def ExtractCPEList(deps_list):
cpe_dump = []
for cpv, record in deps_list.iteritems():
if record['cpes']:
name = '%s/%s' % (record['category'], record['name'])
cpe_dump.append({'ComponentName': name,
'Repository': 'cros',
'Targets': sorted(record['cpes'])})
else:
logging.warning('No CPE entry for %s', cpv)
return sorted(cpe_dump, key=lambda k: k['ComponentName'])
def main(argv):
parser = commandline.ArgumentParser(description="""
This extracts the dependency tree for the specified package, and outputs it
to stdout, in a serialized JSON format.""")
parser.add_argument('--board', default=None,
help='The board to use when computing deps.')
parser.add_argument('--format', default='deps',
choices=['deps', 'cpe'],
help='Output either traditional deps or CPE-only JSON.')
parser.add_argument('--output-path', default=None,
help='Write output to the given path.')
known_args, unknown_args = parser.parse_known_args(argv)
lib_argv = []
if known_args.board:
lib_argv += ['--board=%s' % known_args.board]
lib_argv += ['--quiet', '--pretend', '--emptytree']
lib_argv.extend(unknown_args)
deps = DepGraphGenerator()
deps.Initialize(lib_argv)
deps_tree, _deps_info = deps.GenDependencyTree()
deps_list = FlattenDepTree(deps_tree, get_cpe=(known_args.format == 'cpe'))
if known_args.format == 'cpe':
deps_list = ExtractCPEList(deps_list)
deps_output = json.dumps(deps_list, sort_keys=True, indent=2)
if known_args.output_path:
with open(known_args.output_path, 'w') as f:
f.write(deps_output)
else:
print(deps_output)
|
guorendong/iridium-browser-ubuntu
|
third_party/chromite/scripts/cros_extract_deps.py
|
Python
|
bsd-3-clause
| 6,727
|
[
"VisIt"
] |
9dfd84f6a6c4326630434e276ca8c488425b3d52f84c2cd45ca09e5f02cf6da1
|
# Copyright 2007 by Tiago Antao <tiagoantao@gmail.com>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import os
from Bio.PopGen import GenePop
from Bio.PopGen.GenePop import FileParser
import Bio.PopGen.FDist
# Quite a few utility functions could be done (like remove pop,
# add locus, etc...). The recommended strategy is convert back
# and forth from/to GenePop and use GenePop Utils
def convert_genepop_to_fdist(gp_rec, report_pops = None):
"""Converts a GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (either standard or big)
Returns:
FDist record.
"""
if hasattr(gp_rec, "populations"):
return _convert_genepop_to_fdist(gp_rec)
else:
return _convert_genepop_to_fdist_big(gp_rec, report_pops)
def _convert_genepop_to_fdist(gp_rec):
"""Converts a standard GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (Standard)
Returns:
FDist record.
"""
fd_rec = Bio.PopGen.FDist.Record()
fd_rec.data_org = 0
fd_rec.num_loci = len(gp_rec.loci_list)
fd_rec.num_pops = len(gp_rec.populations)
for lc_i in range(len(gp_rec.loci_list)):
alleles = []
pop_data = []
for pop_i in range(len(gp_rec.populations)):
for indiv in gp_rec.populations[pop_i]:
for al in indiv[1][lc_i]:
if al is not None and al not in alleles:
alleles.append(al)
alleles.sort() #Dominance requires this
#here we go again (necessary...)
for pop_i in range(len(gp_rec.populations)):
allele_counts = {}
for indiv in gp_rec.populations[pop_i]:
for al in indiv[1][lc_i]:
if al is not None:
count = allele_counts.get(al, 0)
allele_counts[al] = count + 1
allele_array = [] #We need the same order as in alleles
for allele in alleles:
allele_array.append(allele_counts.get(allele, 0))
pop_data.append(allele_array)
#if lc_i==3:
# print alleles, allele_counts#, pop_data
fd_rec.loci_data.append((len(alleles), pop_data))
return fd_rec
def _convert_genepop_to_fdist_big(gp_rec, report_pops = None):
"""Converts a big GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (Big)
Returns:
FDist record.
"""
fd_rec = Bio.PopGen.FDist.Record()
fd_rec.data_org = 1
fd_rec.num_loci = len(gp_rec.loci_list)
num_loci = len(gp_rec.loci_list)
loci = []
for i in range(num_loci):
loci.append(set())
pops = []
work_rec = FileParser.read(gp_rec.fname)
lParser = work_rec.get_individual()
def init_pop():
my_pop = []
for i in range(num_loci):
my_pop.append({})
return my_pop
curr_pop = init_pop()
num_pops = 1
if report_pops:
report_pops(num_pops)
while lParser:
if lParser != True:
for loci_pos in range(num_loci):
for al in lParser[1][loci_pos]:
if al is not None:
loci[loci_pos].add(al)
curr_pop[loci_pos][al]= curr_pop[loci_pos].get(al,0)+1
else:
pops.append(curr_pop)
num_pops += 1
if report_pops:
report_pops(num_pops)
curr_pop = init_pop()
lParser = work_rec.get_individual()
work_rec._handle.close() #TODO - Needs a proper fix
pops.append(curr_pop)
fd_rec.num_pops = num_pops
for loci_pos in range(num_loci):
alleles = list(loci[loci_pos])
alleles.sort()
loci_rec = [len(alleles), []]
for pop in pops:
pop_rec = []
for allele in alleles:
pop_rec.append(pop[loci_pos].get(allele, 0))
loci_rec[1].append(pop_rec)
fd_rec.loci_data.append(tuple(loci_rec))
return fd_rec
def _convert_genepop_to_fdist_big_old(gp_rec, report_loci = None):
"""Converts a big GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (Big)
Returns:
FDist record.
"""
fd_rec = Bio.PopGen.FDist.Record()
def countPops(rec):
f2 = FileParser.read(rec.fname)
popCnt = 1
while f2.skip_population():
popCnt += 1
return popCnt
fd_rec.data_org = 0
fd_rec.num_loci = len(gp_rec.loci_list)
work_rec0 = FileParser.read(gp_rec.fname)
fd_rec.num_pops = countPops(work_rec0)
num_loci = len(gp_rec.loci_list)
for lc_i in range(num_loci):
if report_loci:
report_loci(lc_i, num_loci)
work_rec = FileParser.read(gp_rec.fname)
work_rec2 = FileParser.read(gp_rec.fname)
alleles = []
pop_data = []
lParser = work_rec.get_individual()
while lParser:
if lParser != True:
for al in lParser[1][lc_i]:
if al is not None and al not in alleles:
alleles.append(al)
lParser = work_rec.get_individual()
#here we go again (necessary...)
alleles.sort()
def process_pop(pop_data, alleles, allele_counts):
allele_array = [] #We need the same order as in alleles
for allele in alleles:
allele_array.append(allele_counts.get(allele, 0))
pop_data.append(allele_array)
lParser = work_rec2.get_individual()
allele_counts = {}
for allele in alleles:
allele_counts[allele] = 0
allele_counts[None]=0
while lParser:
if lParser == True:
process_pop(pop_data, alleles, allele_counts)
allele_counts = {}
for allele in alleles:
allele_counts[allele] = 0
allele_counts[None]=0
else:
for al in lParser[1][lc_i]:
allele_counts[al] += 1
lParser = work_rec2.get_individual()
process_pop(pop_data, alleles, allele_counts)
fd_rec.loci_data.append((len(alleles), pop_data))
return fd_rec
def approximate_fst(desired_fst, simulated_fst, parameter_fst,
max_run_fst = 1, min_run_fst = 0, limit = 0.005):
"""Calculates the next Fst attempt in order to approximate a
desired Fst.
"""
if abs(simulated_fst - desired_fst) < limit:
return parameter_fst, max_run_fst, min_run_fst
if simulated_fst > desired_fst:
max_run_fst = parameter_fst
next_parameter_fst = (min_run_fst + parameter_fst)/2
else:
min_run_fst = parameter_fst
next_parameter_fst = (max_run_fst + parameter_fst)/2
return next_parameter_fst, max_run_fst, min_run_fst
|
bryback/quickseq
|
genescript/Bio/PopGen/FDist/Utils.py
|
Python
|
mit
| 7,053
|
[
"Biopython"
] |
580f7c17a2a0e091f103b96423d5f7999f6c930f1f80173f3c6e0bba99bd31c1
|
"""
# Notes:
- This simulation seeks to emulate the COBAHH benchmark simulations of (Brette
et al. 2007) using the Brian2 simulator for speed benchmark comparison to
DynaSim. However, this simulation does NOT include synapses, for better
comparison to Figure 5 of (Goodman and Brette, 2008) - although it uses the
COBAHH model of (Brette et al. 2007), not CUBA.
- The time taken to simulate will be indicated in the stdout log file
'~/batchdirs/brian_benchmark_COBAHH_nosyn_32000/pbsout/brian_benchmark_COBAHH_nosyn_32000.out'
- Note that this code has been slightly modified from the original (Brette et
al. 2007) benchmarking code, available here on ModelDB:
https://senselab.med.yale.edu/modeldb/showModel.cshtml?model=83319
in order to work with version 2 of the Brian simulator (aka Brian2), and also
modified to change the model being benchmarked, etc.
# References:
- Brette R, Rudolph M, Carnevale T, Hines M, Beeman D, Bower JM, et al.
Simulation of networks of spiking neurons: A review of tools and strategies.
Journal of Computational Neuroscience 2007;23:349–98.
doi:10.1007/s10827-007-0038-6.
- Goodman D, Brette R. Brian: a simulator for spiking neural networks in Python.
Frontiers in Neuroinformatics 2008;2. doi:10.3389/neuro.11.005.2008.
"""
from brian2 import *
# Parameters
cells = 32000
defaultclock.dt = 0.01*ms
area = 20000*umetre**2
Cm = (1*ufarad*cmetre**-2) * area
gl = (5e-5*siemens*cmetre**-2) * area
El = -60*mV
EK = -90*mV
ENa = 50*mV
g_na = (100*msiemens*cmetre**-2) * area
g_kd = (30*msiemens*cmetre**-2) * area
VT = -63*mV
# # Time constants
# taue = 5*ms
# taui = 10*ms
# # Reversal potentials
# Ee = 0*mV
# Ei = -80*mV
# we = 6*nS # excitatory synaptic weight
# wi = 67*nS # inhibitory synaptic weight
# The model
eqs = Equations('''
dv/dt = (gl*(El-v)-
g_na*(m*m*m)*h*(v-ENa)-
g_kd*(n*n*n*n)*(v-EK))/Cm : volt
dm/dt = alpha_m*(1-m)-beta_m*m : 1
dn/dt = alpha_n*(1-n)-beta_n*n : 1
dh/dt = alpha_h*(1-h)-beta_h*h : 1
alpha_m = 0.32*(mV**-1)*(13*mV-v+VT)/
(exp((13*mV-v+VT)/(4*mV))-1.)/ms : Hz
beta_m = 0.28*(mV**-1)*(v-VT-40*mV)/
(exp((v-VT-40*mV)/(5*mV))-1)/ms : Hz
alpha_h = 0.128*exp((17*mV-v+VT)/(18*mV))/ms : Hz
beta_h = 4./(1+exp((40*mV-v+VT)/(5*mV)))/ms : Hz
alpha_n = 0.032*(mV**-1)*(15*mV-v+VT)/
(exp((15*mV-v+VT)/(5*mV))-1.)/ms : Hz
beta_n = .5*exp((10*mV-v+VT)/(40*mV))/ms : Hz
''')
# dv/dt = (gl*(El-v)+ge*(Ee-v)+gi*(Ei-v)-
# dge/dt = -ge*(1./taue) : siemens
# dgi/dt = -gi*(1./taui) : siemens
P = NeuronGroup(cells, model=eqs, threshold='v>-20*mV', refractory=3*ms,
method='euler')
proportion=int(0.8*cells)
Pe = P[:proportion]
Pi = P[proportion:]
# Ce = Synapses(Pe, P, on_pre='ge+=we')
# Ci = Synapses(Pi, P, on_pre='gi+=wi')
# Ce.connect(p=0.98)
# Ci.connect(p=0.98)
# Initialization
P.v = 'El + (randn() * 5 - 5)*mV'
# P.ge = '(randn() * 1.5 + 4) * 10.*nS'
# P.gi = '(randn() * 12 + 20) * 10.*nS'
# Record a few traces
trace = StateMonitor(P, 'v', record=[1, 10, 100])
totaldata = StateMonitor(P, 'v', record=True)
run(0.5 * second, report='text')
# plot(trace.t/ms, trace[1].v/mV)
# plot(trace.t/ms, trace[10].v/mV)
# plot(trace.t/ms, trace[100].v/mV)
# xlabel('t (ms)')
# ylabel('v (mV)')
# show()
# print("Saving TC cell voltages!")
# numpy.savetxt("foo_totaldata.csv", totaldata.v/mV, delimiter=",")
|
asoplata/dynasim-benchmark-brette-2007
|
output/Brian2/brian2_benchmark_COBAHH_nosyn_32000/brian2_benchmark_COBAHH_nosyn_32000.py
|
Python
|
gpl-3.0
| 3,353
|
[
"Brian"
] |
8a38da863d067a51432cb76e82fe60cddaf57d1b20aa2679e5e5eb05c266d356
|
# coding=utf8
#
# Copyright 2013 Dreamlab Onet.pl
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 3.0.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, visit
#
# http://www.gnu.org/licenses/lgpl.txt
#
import pprint
from rmock.tools import len_trim_dict
from rmock.core.call import Call
from rmock.core.subset import ParamsSubset
from rmock.core.subset import DictSubset
from rmock.config import get_config
conf = get_config()
class RmockFunctionProxy(object):
def __init__(self, parent_rmock, funcname):
self.args = None
self.kwargs = None
self.with_params = False
self.funcname = funcname
self.parent_rmock = parent_rmock
self.rmock_data = self.parent_rmock.get_rmock_data()
def assert_called(self):
if self.calls_count == 0:
raise AssertionError("%s not called at all" % self.funcname)
def assert_called_n_times(self, n):
if self.calls_count != n:
raise AssertionError("%s called %s times (%s expected)"
% (self.funcname, self.calls_count, n))
def assert_called_once(self):
self.assert_called_n_times(1)
def assert_called_with(self, *args, **kwargs):
args, kwargs = self._get_call_params(args, kwargs)
func_calls = self.calls
if not func_calls:
raise AssertionError("%s not called at all" % self.funcname)
expected_calls = (args, kwargs)
if expected_calls not in func_calls:
raise AssertionError("invalid %s call;\n expected: %s;\n got: %s" % (
self.funcname,
pprint.pformat(len_trim_dict(expected_calls)),
pprint.pformat(len_trim_dict(map(Call._to_tuple, func_calls))))
)
def assert_called_once_with(self, *args, **kwargs):
self.assert_called_with(*args, **kwargs)
self.assert_called_once()
def assert_has_calls(self, calls, check_call_count=True):
if check_call_count and len(calls) != len(self.calls):
raise AssertionError("%s called invalid number of times; expected: %s;\n got: %s"
% (self.funcname, len(calls), len(self.calls)))
for call in calls:
self.assert_called_with(*call.args, **call.kwargs)
def assert_not_called(self, *args, **kwargs):
call_count = len(self.calls)
if call_count != 0:
raise AssertionError("%s was unexpectedly called (%s times)" %
(self.funcname, call_count))
def assert_not_called_with(self, *args, **kwargs):
func_calls = self.calls
expected_calls = (args, kwargs)
call_count = func_calls.count(expected_calls)
if call_count != 0:
raise AssertionError("%s was unexpectedly called with desired arguments (%s times)" %
(self.funcname, call_count))
@property
def calls(self):
return self.rmock_data.get_calls(self.funcname)
@property
def calls_count(self):
return len(self.calls)
@property
def called(self):
return len(self.calls) > 0
@property
def return_value(self):
return self.rmock_data.get_result(self.funcname, self.args, self.kwargs)
@return_value.setter
def return_value(self, result):
if self.with_params:
self.rmock_data.set_result_with_params(self.funcname, result, self.args, self.kwargs)
else:
self.rmock_data.set_result(self.funcname, result)
side_effect = return_value
def __call__(self, *args, **kwargs):
self.with_params = True
self.args, self.kwargs = self._get_call_params(args, kwargs)
return self
def _get_call_params(self, args, kwargs):
if len(args) == 1 and not kwargs and isinstance(args[0], ParamsSubset):
sbs = args[0]
return sbs.args, DictSubset(sbs.kwargs)
else:
return args, kwargs
|
tikan/rmock
|
src/rmock/core/function.py
|
Python
|
lgpl-3.0
| 4,624
|
[
"VisIt"
] |
9213dada4adf5d6b9c93c9d0cc8e40c07690aed1ec172ce3d3fece620e83974a
|
'''
Created on Sept 22, 2017
Convert IPW TOPO files to netCDF
@author: Micah prime
'''
import pandas as pd
import numpy as np
from smrf import ipw
import netCDF4 as nc
from datetime import datetime
from matplotlib import pyplot as plt
fp_output = '/home/micahsandusky/Code/workdir/test_ipw_convert/tuol_topo.nc'
dem = '/data/blizzard/Tuolumne/aso-wy13/data/smrf_outputs/input_backup/tuolx_dem_50m.ipw'
mask = '/data/blizzard/Tuolumne/aso-wy13/data/smrf_outputs/input_backup/tuolx_hetchy_mask_50m.ipw'
veg_type = '/data/blizzard/Tuolumne/aso-wy13/data/smrf_outputs/input_backup/tuolx_vegnlcd_50m.ipw'
veg_height = '/data/blizzard/Tuolumne/aso-wy13/data/smrf_outputs/input_backup/tuolx_vegheight_50m.ipw'
veg_k = '/data/blizzard/Tuolumne/aso-wy13/data/smrf_outputs/input_backup/tuolx_vegk_50m.ipw'
veg_tau = '/data/blizzard/Tuolumne/aso-wy13/data/smrf_outputs/input_backup/tuolx_vegtau_50m.ipw'
u = 4246192
v = 238672
du = -50
dv = 50
units = 'm'
csys = 'UTM'
nx = 1374
ny = 1339
nbits = 16
print "convert all .ipw output files to netcdf topo files"
# create the x,y vectors
x = v + dv*np.arange(nx)
y = u + du*np.arange(ny)
# create nc file
nc_topo = nc.Dataset(fp_output, 'w',
format='NETCDF4', clobber=False)
#===============================================================================
# NetCDF TOPO image
#===============================================================================
s = {}
s['name'] = ['veg_height','veg_type','mask','dem','veg_tau','veg_k']
s['file'] = [veg_height, veg_type, mask, dem, veg_tau, veg_k]
s['units'] = ['m','-','-','m','-','-']
s['description'] =['Roughness height of vegetation','NLCD 2011 Classification','Basin mask',
'Basin DEM','Vegetation optical transmissivity coefficient',
'Vegetation solar extinction coefficient']
s['long_name'] =['Vegetation height','Vegetation type','Mask',
'Digital Elevation Map','Optical transmissivity',
'Solar extinction']
# create the dimensions
nc_topo.createDimension('y',ny)
nc_topo.createDimension('x',nx)
# create some variables
nc_topo.createVariable('y', 'f', 'y')
nc_topo.createVariable('x', 'f', 'x')
nc_topo.variables['x'][:] = x
nc_topo.variables['y'][:] = y
# snow image
for i,v in enumerate(s['name']):
nc_topo.createVariable(v, 'f', ['y','x'], chunksizes=(10,10))
setattr(nc_topo.variables[v], 'units', s['units'][i])
setattr(nc_topo.variables[v], 'description', s['description'][i])
setattr(nc_topo.variables[v], 'long_name', s['long_name'][i])
#===============================================================================
# open ipw file, and add to netCDF
#===============================================================================
for f, var in zip(s['file'], s['name']):
# Read the IPW file
i = ipw.IPW(f)
# assign to netcdf
tmp = i.bands[0].data
plt.imshow(tmp)
plt.colorbar()
plt.show()
nc_topo.variables[var][:] = i.bands[0].data
nc_topo.sync()
#===============================================================================
# set attributes
#===============================================================================
# the y variable attributes
nc_topo.variables['y'].setncattr(
'units',
'meters')
nc_topo.variables['y'].setncattr(
'description',
'UTM, north south')
nc_topo.variables['y'].setncattr(
'long_name',
'y coordinate')
# the x variable attributes
nc_topo.variables['x'].setncattr(
'units',
'meters')
nc_topo.variables['x'].setncattr(
'description',
'UTM, east west')
nc_topo.variables['x'].setncattr(
'long_name',
'x coordinate')
# define some global attributes
nc_topo.setncattr_string('Conventions', 'CF-1.6')
nc_topo.setncattr_string('dateCreated', datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
nc_topo.setncattr_string('history', '[{}] Create netCDF4 file'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
nc_topo.setncattr_string('institution',
'USDA Agricultural Research Service, Northwest Watershed Research Center')
# close file
nc_topo.close()
|
USDA-ARS-NWRC/AWSF
|
examples/make_topo_ncdf.py
|
Python
|
gpl-3.0
| 4,185
|
[
"NetCDF"
] |
c3d1643b0b9768f0c578d711909b8c7b85b2c2d009116bd8849ee962deda37c3
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that all files contain proper licensing information."""
import json
import optparse
import os.path
import subprocess
import sys
def PrintUsage():
print """Usage: python checklicenses.py [--root <root>] [tocheck]
--root Specifies the repository root. This defaults to "../.." relative
to the script file. This will be correct given the normal location
of the script in "<root>/tools/checklicenses".
--ignore-suppressions Ignores path-specific license whitelist. Useful when
trying to remove a suppression/whitelist entry.
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python checklicenses.py
python checklicenses.py --root ~/chromium/src third_party"""
WHITELISTED_LICENSES = [
'Anti-Grain Geometry',
'Apache (v2.0)',
'Apache (v2.0) BSD (2 clause)',
'Apache (v2.0) GPL (v2)',
'Apple MIT', # https://fedoraproject.org/wiki/Licensing/Apple_MIT_License
'APSL (v2)',
'APSL (v2) BSD (4 clause)',
'BSD',
'BSD (2 clause)',
'BSD (2 clause) ISC',
'BSD (2 clause) MIT/X11 (BSD like)',
'BSD (3 clause)',
'BSD (3 clause) GPL (v2)',
'BSD (3 clause) ISC',
'BSD (3 clause) LGPL (v2 or later)',
'BSD (3 clause) LGPL (v2.1 or later)',
'BSD (3 clause) MIT/X11 (BSD like)',
'BSD (4 clause)',
'BSD-like',
# TODO(phajdan.jr): Make licensecheck not print BSD-like twice.
'BSD-like MIT/X11 (BSD like)',
'BSL (v1.0)',
'FreeType (BSD like)',
'FreeType (BSD like) with patent clause',
'GPL (v2) LGPL (v2.1 or later)',
'GPL (v2 or later) with Bison parser exception',
'GPL (v2 or later) with libtool exception',
'GPL (v3 or later) with Bison parser exception',
'GPL with Bison parser exception',
'Independent JPEG Group License',
'ISC',
'LGPL (unversioned/unknown version)',
'LGPL (v2)',
'LGPL (v2 or later)',
'LGPL (v2.1)',
'LGPL (v2.1 or later)',
'LGPL (v3 or later)',
'MIT/X11 (BSD like)',
'MIT/X11 (BSD like) LGPL (v2.1 or later)',
'MPL (v1.0) LGPL (v2 or later)',
'MPL (v1.1)',
'MPL (v1.1) BSD (3 clause) GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) BSD (3 clause) LGPL (v2.1 or later)',
'MPL (v1.1) BSD-like',
'MPL (v1.1) BSD-like GPL (unversioned/unknown version)',
'MPL (v1.1) BSD-like GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) GPL (v2)',
'MPL (v1.1) GPL (v2) LGPL (v2 or later)',
'MPL (v1.1) GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) GPL (unversioned/unknown version)',
'MPL (v1.1) LGPL (v2 or later)',
'MPL (v1.1) LGPL (v2.1 or later)',
'MPL (v2.0)',
'Ms-PL',
'Public domain',
'Public domain BSD',
'Public domain BSD (3 clause)',
'Public domain BSD-like',
'Public domain LGPL (v2.1 or later)',
'libpng',
'zlib/libpng',
'SGI Free Software License B',
'University of Illinois/NCSA Open Source License (BSD like)',
('University of Illinois/NCSA Open Source License (BSD like) '
'MIT/X11 (BSD like)'),
]
PATH_SPECIFIC_WHITELISTED_LICENSES = {
'base/third_party/icu': [ # http://crbug.com/98087
'UNKNOWN',
],
# http://code.google.com/p/google-breakpad/issues/detail?id=450
'breakpad/src': [
'UNKNOWN',
],
'chrome/common/extensions/docs/examples': [ # http://crbug.com/98092
'UNKNOWN',
],
'courgette/third_party/bsdiff_create.cc': [ # http://crbug.com/98095
'UNKNOWN',
],
'native_client': [ # http://crbug.com/98099
'UNKNOWN',
],
'native_client/toolchain': [
'BSD GPL (v2 or later)',
'BSD (2 clause) GPL (v2 or later)',
'BSD (3 clause) GPL (v2 or later)',
'BSL (v1.0) GPL',
'BSL (v1.0) GPL (v3.1)',
'GPL',
'GPL (unversioned/unknown version)',
'GPL (v2)',
'GPL (v2 or later)',
'GPL (v3.1)',
'GPL (v3 or later)',
],
'third_party/WebKit': [
'UNKNOWN',
],
# http://code.google.com/p/angleproject/issues/detail?id=217
'third_party/angle': [
'UNKNOWN',
],
# http://crbug.com/222828
# http://bugs.python.org/issue17514
'third_party/chromite/third_party/argparse.py': [
'UNKNOWN',
],
# http://crbug.com/326117
# https://bitbucket.org/chrisatlee/poster/issue/21
'third_party/chromite/third_party/poster': [
'UNKNOWN',
],
# http://crbug.com/333508
'third_party/clang_format/script': [
'UNKNOWN',
],
'third_party/devscripts': [
'GPL (v2 or later)',
],
'third_party/expat/files/lib': [ # http://crbug.com/98121
'UNKNOWN',
],
'third_party/ffmpeg': [
'GPL',
'GPL (v2)',
'GPL (v2 or later)',
'UNKNOWN', # http://crbug.com/98123
],
'third_party/fontconfig': [
# https://bugs.freedesktop.org/show_bug.cgi?id=73401
'UNKNOWN',
],
'third_party/freetype2': [ # http://crbug.com/177319
'UNKNOWN',
],
'third_party/hunspell': [ # http://crbug.com/98134
'UNKNOWN',
],
'third_party/iccjpeg': [ # http://crbug.com/98137
'UNKNOWN',
],
'third_party/icu': [ # http://crbug.com/98301
'UNKNOWN',
],
'third_party/lcov': [ # http://crbug.com/98304
'UNKNOWN',
],
'third_party/lcov/contrib/galaxy/genflat.pl': [
'GPL (v2 or later)',
],
'third_party/libc++/trunk/include/support/solaris': [
# http://llvm.org/bugs/show_bug.cgi?id=18291
'UNKNOWN',
],
'third_party/libc++/trunk/src/support/solaris/xlocale.c': [
# http://llvm.org/bugs/show_bug.cgi?id=18291
'UNKNOWN',
],
'third_party/libc++/trunk/test': [
# http://llvm.org/bugs/show_bug.cgi?id=18291
'UNKNOWN',
],
'third_party/libevent': [ # http://crbug.com/98309
'UNKNOWN',
],
'third_party/libjingle/source/talk': [ # http://crbug.com/98310
'UNKNOWN',
],
'third_party/libjpeg_turbo': [ # http://crbug.com/98314
'UNKNOWN',
],
'third_party/libpng': [ # http://crbug.com/98318
'UNKNOWN',
],
# The following files lack license headers, but are trivial.
'third_party/libusb/src/libusb/os/poll_posix.h': [
'UNKNOWN',
],
'third_party/libvpx/source': [ # http://crbug.com/98319
'UNKNOWN',
],
'third_party/libxml': [
'UNKNOWN',
],
'third_party/libxslt': [
'UNKNOWN',
],
'third_party/lzma_sdk': [
'UNKNOWN',
],
'third_party/mesa/src': [
'GPL (v2)',
'GPL (v3 or later)',
'MIT/X11 (BSD like) GPL (v3 or later) with Bison parser exception',
'UNKNOWN', # http://crbug.com/98450
],
'third_party/modp_b64': [
'UNKNOWN',
],
'third_party/openmax_dl/dl' : [
'Khronos Group',
],
'third_party/openssl': [ # http://crbug.com/98451
'UNKNOWN',
],
'third_party/ots/tools/ttf-checksum.py': [ # http://code.google.com/p/ots/issues/detail?id=2
'UNKNOWN',
],
'third_party/molokocacao': [ # http://crbug.com/98453
'UNKNOWN',
],
'third_party/npapi/npspy': [
'UNKNOWN',
],
'third_party/ocmock/OCMock': [ # http://crbug.com/98454
'UNKNOWN',
],
'third_party/ply/__init__.py': [
'UNKNOWN',
],
'third_party/protobuf': [ # http://crbug.com/98455
'UNKNOWN',
],
# http://crbug.com/222831
# https://bitbucket.org/eliben/pyelftools/issue/12
'third_party/pyelftools': [
'UNKNOWN',
],
'third_party/scons-2.0.1/engine/SCons': [ # http://crbug.com/98462
'UNKNOWN',
],
'third_party/simplejson': [
'UNKNOWN',
],
'third_party/skia': [ # http://crbug.com/98463
'UNKNOWN',
],
'third_party/snappy/src': [ # http://crbug.com/98464
'UNKNOWN',
],
'third_party/smhasher/src': [ # http://crbug.com/98465
'UNKNOWN',
],
'third_party/speech-dispatcher/libspeechd.h': [
'GPL (v2 or later)',
],
'third_party/sqlite': [
'UNKNOWN',
],
# http://crbug.com/334668
# MIT license.
'tools/swarming_client/third_party/httplib2': [
'UNKNOWN',
],
# http://crbug.com/334668
# Apache v2.0.
'tools/swarming_client/third_party/oauth2client': [
'UNKNOWN',
],
# https://github.com/kennethreitz/requests/issues/1610
'tools/swarming_client/third_party/requests': [
'UNKNOWN',
],
'third_party/swig/Lib/linkruntime.c': [ # http://crbug.com/98585
'UNKNOWN',
],
'third_party/talloc': [
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98588
],
'third_party/tcmalloc': [
'UNKNOWN', # http://crbug.com/98589
],
'third_party/tlslite': [
'UNKNOWN',
],
'third_party/webdriver': [ # http://crbug.com/98590
'UNKNOWN',
],
# https://github.com/html5lib/html5lib-python/issues/125
# https://github.com/KhronosGroup/WebGL/issues/435
'third_party/webgl/src': [
'UNKNOWN',
],
'third_party/webrtc': [ # http://crbug.com/98592
'UNKNOWN',
],
'third_party/xdg-utils': [ # http://crbug.com/98593
'UNKNOWN',
],
'third_party/yasm/source': [ # http://crbug.com/98594
'UNKNOWN',
],
'third_party/zlib/contrib/minizip': [
'UNKNOWN',
],
'third_party/zlib/trees.h': [
'UNKNOWN',
],
'tools/emacs': [ # http://crbug.com/98595
'UNKNOWN',
],
'tools/gyp/test': [
'UNKNOWN',
],
'tools/python/google/__init__.py': [
'UNKNOWN',
],
'tools/stats_viewer/Properties/AssemblyInfo.cs': [
'UNKNOWN',
],
'tools/symsrc/pefile.py': [
'UNKNOWN',
],
'tools/telemetry/third_party/pyserial': [
# https://sourceforge.net/p/pyserial/feature-requests/35/
'UNKNOWN',
],
'v8/test/cctest': [ # http://crbug.com/98597
'UNKNOWN',
],
}
def check_licenses(options, args):
# Figure out which directory we have to check.
if len(args) == 0:
# No directory to check specified, use the repository root.
start_dir = options.base_directory
elif len(args) == 1:
# Directory specified. Start here. It's supposed to be relative to the
# base directory.
start_dir = os.path.abspath(os.path.join(options.base_directory, args[0]))
else:
# More than one argument, we don't handle this.
PrintUsage()
return 1
print "Using base directory:", options.base_directory
print "Checking:", start_dir
print
licensecheck_path = os.path.abspath(os.path.join(options.base_directory,
'third_party',
'devscripts',
'licensecheck.pl'))
licensecheck = subprocess.Popen([licensecheck_path,
'-l', '100',
'-r', start_dir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = licensecheck.communicate()
if options.verbose:
print '----------- licensecheck stdout -----------'
print stdout
print '--------- end licensecheck stdout ---------'
if licensecheck.returncode != 0 or stderr:
print '----------- licensecheck stderr -----------'
print stderr
print '--------- end licensecheck stderr ---------'
print "\nFAILED\n"
return 1
used_suppressions = set()
errors = []
for line in stdout.splitlines():
filename, license = line.split(':', 1)
filename = os.path.relpath(filename.strip(), options.base_directory)
# All files in the build output directory are generated one way or another.
# There's no need to check them.
if filename.startswith('out/'):
continue
# For now we're just interested in the license.
license = license.replace('*No copyright*', '').strip()
# Skip generated files.
if 'GENERATED FILE' in license:
continue
if license in WHITELISTED_LICENSES:
continue
if not options.ignore_suppressions:
matched_prefixes = [
prefix for prefix in PATH_SPECIFIC_WHITELISTED_LICENSES
if filename.startswith(prefix) and
license in PATH_SPECIFIC_WHITELISTED_LICENSES[prefix]]
if matched_prefixes:
used_suppressions.update(set(matched_prefixes))
continue
errors.append({'filename': filename, 'license': license})
if options.json:
with open(options.json, 'w') as f:
json.dump(errors, f)
if errors:
for error in errors:
print "'%s' has non-whitelisted license '%s'" % (
error['filename'], error['license'])
print "\nFAILED\n"
print "Please read",
print "http://www.chromium.org/developers/adding-3rd-party-libraries"
print "for more info how to handle the failure."
print
print "Please respect OWNERS of checklicenses.py. Changes violating"
print "this requirement may be reverted."
# Do not print unused suppressions so that above message is clearly
# visible and gets proper attention. Too much unrelated output
# would be distracting and make the important points easier to miss.
return 1
print "\nSUCCESS\n"
if not len(args):
unused_suppressions = set(
PATH_SPECIFIC_WHITELISTED_LICENSES.iterkeys()).difference(
used_suppressions)
if unused_suppressions:
print "\nNOTE: unused suppressions detected:\n"
print '\n'.join(unused_suppressions)
return 0
def main():
default_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
option_parser = optparse.OptionParser()
option_parser.add_option('--root', default=default_root,
dest='base_directory',
help='Specifies the repository root. This defaults '
'to "../.." relative to the script file, which '
'will normally be the repository root.')
option_parser.add_option('-v', '--verbose', action='store_true',
default=False, help='Print debug logging')
option_parser.add_option('--ignore-suppressions',
action='store_true',
default=False,
help='Ignore path-specific license whitelist.')
option_parser.add_option('--json', help='Path to JSON output file')
options, args = option_parser.parse_args()
return check_licenses(options, args)
if '__main__' == __name__:
sys.exit(main())
|
boundarydevices/android_external_chromium_org
|
tools/checklicenses/checklicenses.py
|
Python
|
bsd-3-clause
| 15,039
|
[
"Galaxy"
] |
27890a2594cc82937e5ce9804d9b1cc4f6a276857fc518479afd6a7d55c564b4
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlparse
from ..utils import (
ExtractorError,
parse_iso8601,
qualities,
)
class SRGSSRIE(InfoExtractor):
_VALID_URL = r'(?:https?://tp\.srgssr\.ch/p(?:/[^/]+)+\?urn=urn|srgssr):(?P<bu>srf|rts|rsi|rtr|swi):(?:[^:]+:)?(?P<type>video|audio):(?P<id>[0-9a-f\-]{36}|\d+)'
_ERRORS = {
'AGERATING12': 'To protect children under the age of 12, this video is only available between 8 p.m. and 6 a.m.',
'AGERATING18': 'To protect children under the age of 18, this video is only available between 11 p.m. and 5 a.m.',
# 'ENDDATE': 'For legal reasons, this video was only available for a specified period of time.',
'GEOBLOCK': 'For legal reasons, this video is only available in Switzerland.',
'LEGAL': 'The video cannot be transmitted for legal reasons.',
'STARTDATE': 'This video is not yet available. Please try again later.',
}
def _get_tokenized_src(self, url, video_id, format_id):
sp = compat_urllib_parse_urlparse(url).path.split('/')
token = self._download_json(
'http://tp.srgssr.ch/akahd/token?acl=/%s/%s/*' % (sp[1], sp[2]),
video_id, 'Downloading %s token' % format_id, fatal=False) or {}
auth_params = token.get('token', {}).get('authparams')
if auth_params:
url += '?' + auth_params
return url
def get_media_data(self, bu, media_type, media_id):
media_data = self._download_json(
'http://il.srgssr.ch/integrationlayer/1.0/ue/%s/%s/play/%s.json' % (bu, media_type, media_id),
media_id)[media_type.capitalize()]
if media_data.get('block') and media_data['block'] in self._ERRORS:
raise ExtractorError('%s said: %s' % (
self.IE_NAME, self._ERRORS[media_data['block']]), expected=True)
return media_data
def _real_extract(self, url):
bu, media_type, media_id = re.match(self._VALID_URL, url).groups()
media_data = self.get_media_data(bu, media_type, media_id)
metadata = media_data['AssetMetadatas']['AssetMetadata'][0]
title = metadata['title']
description = metadata.get('description')
created_date = media_data.get('createdDate') or metadata.get('createdDate')
timestamp = parse_iso8601(created_date)
thumbnails = [{
'id': image.get('id'),
'url': image['url'],
} for image in media_data.get('Image', {}).get('ImageRepresentations', {}).get('ImageRepresentation', [])]
preference = qualities(['LQ', 'MQ', 'SD', 'HQ', 'HD'])
formats = []
for source in media_data.get('Playlists', {}).get('Playlist', []) + media_data.get('Downloads', {}).get('Download', []):
protocol = source.get('@protocol')
for asset in source['url']:
asset_url = asset['text']
quality = asset['@quality']
format_id = '%s-%s' % (protocol, quality)
if protocol.startswith('HTTP-HDS') or protocol.startswith('HTTP-HLS'):
asset_url = self._get_tokenized_src(asset_url, media_id, format_id)
if protocol.startswith('HTTP-HDS'):
formats.extend(self._extract_f4m_formats(
asset_url + ('?' if '?' not in asset_url else '&') + 'hdcore=3.4.0',
media_id, f4m_id=format_id, fatal=False))
elif protocol.startswith('HTTP-HLS'):
formats.extend(self._extract_m3u8_formats(
asset_url, media_id, 'mp4', 'm3u8_native',
m3u8_id=format_id, fatal=False))
else:
formats.append({
'format_id': format_id,
'url': asset_url,
'preference': preference(quality),
'ext': 'flv' if protocol == 'RTMP' else None,
})
self._sort_formats(formats)
return {
'id': media_id,
'title': title,
'description': description,
'timestamp': timestamp,
'thumbnails': thumbnails,
'formats': formats,
}
class SRGSSRPlayIE(InfoExtractor):
IE_DESC = 'srf.ch, rts.ch, rsi.ch, rtr.ch and swissinfo.ch play sites'
_VALID_URL = r'https?://(?:(?:www|play)\.)?(?P<bu>srf|rts|rsi|rtr|swissinfo)\.ch/play/(?:tv|radio)/[^/]+/(?P<type>video|audio)/[^?]+\?id=(?P<id>[0-9a-f\-]{36}|\d+)'
_TESTS = [{
'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
'md5': 'da6b5b3ac9fa4761a942331cef20fcb3',
'info_dict': {
'id': '28e1a57d-5b76-4399-8ab3-9097f071e6c5',
'ext': 'mp4',
'upload_date': '20130701',
'title': 'Snowden beantragt Asyl in Russland',
'timestamp': 1372713995,
}
}, {
# No Speichern (Save) button
'url': 'http://www.srf.ch/play/tv/top-gear/video/jaguar-xk120-shadow-und-tornado-dampflokomotive?id=677f5829-e473-4823-ac83-a1087fe97faa',
'md5': '0a274ce38fda48c53c01890651985bc6',
'info_dict': {
'id': '677f5829-e473-4823-ac83-a1087fe97faa',
'ext': 'flv',
'upload_date': '20130710',
'title': 'Jaguar XK120, Shadow und Tornado-Dampflokomotive',
'description': 'md5:88604432b60d5a38787f152dec89cd56',
'timestamp': 1373493600,
},
}, {
'url': 'http://www.rtr.ch/play/radio/actualitad/audio/saira-tujetsch-tuttina-cuntinuar-cun-sedrun-muster-turissem?id=63cb0778-27f8-49af-9284-8c7a8c6d15fc',
'info_dict': {
'id': '63cb0778-27f8-49af-9284-8c7a8c6d15fc',
'ext': 'mp3',
'upload_date': '20151013',
'title': 'Saira: Tujetsch - tuttina cuntinuar cun Sedrun Mustér Turissem',
'timestamp': 1444750398,
},
'params': {
# rtmp download
'skip_download': True,
},
}, {
'url': 'http://www.rts.ch/play/tv/-/video/le-19h30?id=6348260',
'md5': '67a2a9ae4e8e62a68d0e9820cc9782df',
'info_dict': {
'id': '6348260',
'display_id': '6348260',
'ext': 'mp4',
'duration': 1796,
'title': 'Le 19h30',
'description': '',
'uploader': '19h30',
'upload_date': '20141201',
'timestamp': 1417458600,
'thumbnail': r're:^https?://.*\.image',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
}
}]
def _real_extract(self, url):
bu, media_type, media_id = re.match(self._VALID_URL, url).groups()
# other info can be extracted from url + '&layout=json'
return self.url_result('srgssr:%s:%s:%s' % (bu[:3], media_type, media_id), 'SRGSSR')
|
israeltobias/DownMedia
|
youtube-dl/youtube_dl/extractor/srgssr.py
|
Python
|
gpl-3.0
| 7,163
|
[
"Jaguar"
] |
da822ae8639441cbd453fede5d6257d24b728373c22eecf82ff1cb870750c955
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from Method import *
from RombergIterate import *
import sys, time, datetime, os
if __name__ == "__main__":
rangeOut = False
sair = False
while( not sair):
firstTime = False
os.system('clear')
print bcolors.HEADER + '============================= Romberg =========================' + bcolors.ENDC
print 'Option - Description'
print ' 1 - Solve the problem using Romberg Recursive'
print ' 2 - Solve the problem using Romberg Iterative'
print ' 3 - Solve the problem using Scipy Library'
print ' 4 - Calculate f(x)'
print ' 5 - About'
print ' 0 - Exit'
print ''
option = -7
while( option < 0 or option > 4):
try:
if rangeOut == True:
print bcolors.FAIL+'Out of range, try again' + bcolors.ENDC
option = int(raw_input('Option: '))
if option < 0 or option > 4:
rangeOut = True
except Exception,msg:
rangeOut = True
if option == 1:
os.system('clear')
print bcolors.HEADER + '========================= Romberg - Recursive =====================' + bcolors.ENDC
#try:
rombergRecursive()
#except Exception,msg:
# print bcolors.FAIL +'Something is wrong!\n'+ str(msg) + bcolors.ENDC
raw_input('\nPress enter')
elif option == 2:
os.system('clear')
print bcolors.HEADER + '========================= Romberg - Iterative =====================' + bcolors.ENDC
#try:
gaussian = lambda x: -1.47206*(10**-7)*(x**10)+0.0000148524*(x**9)-0.000642464*(x**8)+0.0155672*(x**7)-0.231584*(x**6)+2.17898*(x**5)-12.861*(x**4)+45.434*(x**3)-85.9344*(x**2)+65.5502*(x)
rombergIterate(gaussian,a,b,show=True)
#except Exception,msg:
# print bcolors.FAIL +'Something is wrong!\n'+ str(msg) + bcolors.ENDC
raw_input('\nPress enter')
elif option == 4:
os.system('clear')
try:
print bcolors.HEADER + '===================== Romberg - Calculate ======================' + bcolors.ENDC
result = function(int(raw_input('Your \'x\' to function(x):')))
print 'Result to function(x)=',result
except Exception,msg:
print bcolors.FAIL +'Something is wrong!\n'+ str(msg) + bcolors.ENDC
raw_input('Press enter')
elif option == 3:
os.system('clear')
try:
print bcolors.HEADER + '====================== Romberg - Library =======================' + bcolors.ENDC
print 'Using Scipy Library'
print ''
result = rombergByScipy()
except Exception,msg:
print bcolors.FAIL +'Something is wrong!\n'+ str(msg) + bcolors.ENDC
raw_input('\nPress enter')
elif option == 5:
os.system('clear')
print bcolors.HEADER + '===================== Romberg - Group =====================' + bcolors.ENDC
print 'Biblioteca desenvolvida para solucionaro o problema'
print ' proposto no trabalho de Calculo Numerico. Para saber'
print ' mais sobre o trabalho, por favor, acesse Ava/UFPel'
print ''
print 'Integrantes:'
print ' André Alba'
print ' Glauco Roberto'
print ' Guilherme Cousin'
raw_input('\nPress enter')
elif option == 0:
os.system('clear')
print bcolors.HEADER + '======================== Romberg - Exit ========================' + bcolors.ENDC
print 'Good Bye!'
sair = True
else:
os.system('clear')
print bcolors.HEADER + '======================= Romberg - Ooops! ======================' + bcolors.ENDC
print bcolors.FAIL +'Something is wrong! Oops!' + bcolors.ENDC
sair = True
|
glaucomunsberg/romberg
|
Principal.py
|
Python
|
gpl-2.0
| 3,458
|
[
"Gaussian"
] |
1def182dc5ddcdeecc4b3bc710b32e6a49adc5f714790a8ce92ef03f060fe6a0
|
#!/usr/bin/env python
#
# Copyright (C) 2015, the ximpol team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU GengReral Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import numpy
from ximpol.srcmodel.roi import xGaussianDisk, xROIModel
from ximpol.srcmodel.spectrum import power_law
from ximpol.srcmodel.polarization import constant
ROI_MODEL = xROIModel(10., 15.)
energy_spectrum = power_law(10., 2.)
polarization_degree = constant(0.5)
polarization_angle = constant(numpy.radians(65.))
src = xGaussianDisk('Gaussian disk', 10., 15., 0.005, energy_spectrum,
polarization_degree, polarization_angle)
ROI_MODEL.add_source(src)
if __name__ == '__main__':
print(ROI_MODEL)
|
lucabaldini/ximpol
|
ximpol/config/gaussian_disk.py
|
Python
|
gpl-3.0
| 1,308
|
[
"Gaussian"
] |
f62c33bf1ebfef772ff253c03cc0ea611003c883c73185d9671be516edd35e60
|
class TreeNode:
def __init__(self, **kwargs):
self.children = dict()
def get(self, key):
return self.children.get(key, None)
def get_children(self, sort=False):
if sort:
return sorted(self.children.values())
return self.children.values()
def add(self, key, node=None, **kwargs):
if key not in self.children:
if node is None:
node = self.__class__(**kwargs)
self.children[key] = node
return self.children[key]
def set(self, key, val, **kwargs):
self.children[key] = val
return self.children[key]
class OrderedTreeNode:
def __init__(self, **kwargs):
self.children = list()
def get(self, index):
return self.children[index]
def get_children(self, sort=False):
if sort:
return sorted(self.children)
return self.children
def add(self, node=None, **kwargs):
if node is None:
node = self.__class__(**kwargs)
self.children.append(node)
return self.children[-1]
def set(self, index, val, **kwargs):
self.children[index] = val
return self.children[index]
class Tree:
def __init__(self, root=None, node_class=TreeNode, **kwargs):
if root is None:
root = node_class(**kwargs)
self.root = root
self.node_class = node_class
def get_children(self, node, sort=False):
return node.get_children(sort=sort)
def dfs(self, action=None, pre_action=None, post_action=None, sort=False):
assert(action is None or (pre_action is None and post_action is None))
if action is not None and pre_action is None and post_action is None:
pre_action = post_action = action
def visit(node, depth=0, parent=None):
if pre_action:
if pre_action(node, depth, parent):
return
for n in self.get_children(node, sort=sort):
visit(n, depth+1, node)
if post_action:
if post_action(node, depth, parent):
return
visit(self.root)
|
mhozza/string_algorithms
|
string_algorithms/tree.py
|
Python
|
mit
| 2,185
|
[
"VisIt"
] |
8f9b57a35f108602642c0e4eee07a1c287eea7322e44e719214927b09d8fa95b
|
import sys
sys.path.insert(1, "../../../")
import h2o, tests
def distribution_behaviorGBM():
#Log.info("==============================")
#Log.info("Default Behavior - Gaussian")
#Log.info("==============================")
eco = h2o.import_file(path=h2o.locate("smalldata/gbm_test/ecology_model.csv"))
# 0/1 response: expect gaussian
eco_model = h2o.gbm(x=eco[2:13], y=eco["Angaus"])
assert isinstance(eco_model,h2o.model.regression.H2ORegressionModel)
# more than 2 integers for response: expect gaussian
cars = h2o.import_file(path=h2o.locate("smalldata/junit/cars.csv"))
cars_model = h2o.gbm(x=cars[3:7], y=cars["cylinders"])
assert isinstance(cars_model,h2o.model.regression.H2ORegressionModel)
#Log.info("==============================")
#Log.info("Gaussian Behavior")
#Log.info("==============================")
# 0/1 response: expect gaussian
eco_model = h2o.gbm(x=eco[2:13], y=eco["Angaus"], distribution="gaussian")
assert isinstance(eco_model,h2o.model.regression.H2ORegressionModel)
# character response: expect error
try:
eco_model = h2o.gbm(x=eco[1:8], y=eco["Method"], distribution="gaussian")
assert False, "expected an error"
except EnvironmentError:
assert True
#Log.info("==============================")
#Log.info("Bernoulli Behavior")
#Log.info("==============================")
# 0/1 response: expect bernoulli
eco_model = h2o.gbm(x=eco[2:13], y=eco["Angaus"].asfactor(), distribution="bernoulli")
assert isinstance(eco_model,h2o.model.binomial.H2OBinomialModel)
# 2 level character response: expect bernoulli
tree = h2o.import_file(path=h2o.locate("smalldata/junit/test_tree_minmax.csv"))
tree_model = h2o.gbm(x=tree[0:3], y=tree["response"], distribution="bernoulli", min_rows=1)
assert isinstance(tree_model,h2o.model.binomial.H2OBinomialModel)
# more than two integers for response: expect error
try:
cars_mod = h2o.gbm(x=cars[3:7], y=cars["cylinders"], distribution="bernoulli")
assert False, "expected an error"
except EnvironmentError:
assert True
# more than two character levels for response: expect error
try:
eco_model = h2o.gbm(x=eco[0:8], y=eco["Method"], distribution="bernoulli")
assert False, "expected an error"
except EnvironmentError:
assert True
#Log.info("==============================")
#Log.info("Multinomial Behavior")
#Log.info("==============================")
# more than two integers for response: expect multinomial
cars_model = h2o.gbm(x=cars[3:7], y=cars["cylinders"].asfactor(), distribution="multinomial")
assert isinstance(cars_model,h2o.model.multinomial.H2OMultinomialModel)
# more than two character levels for response: expect multinomial
eco_model = h2o.gbm(x=eco[0:8], y=eco["Method"], distribution="multinomial")
assert isinstance(eco_model,h2o.model.multinomial.H2OMultinomialModel)
if __name__ == "__main__":
tests.run_test(sys.argv, distribution_behaviorGBM)
|
brightchen/h2o-3
|
h2o-py/tests/testdir_algos/gbm/pyunit_loss_behaviorGBM.py
|
Python
|
apache-2.0
| 2,967
|
[
"Gaussian"
] |
068531ad020fe6a44bc02dde4a6129db5a89f23b672aa19f637760c8fb64349e
|
# pylint: disable=C0111
# pylint: disable=W0621
# Disable the "wildcard import" warning so we can bring in all methods from
# course helpers and ui helpers
# pylint: disable=W0401
# Disable the "Unused import %s from wildcard import" warning
# pylint: disable=W0614
# Disable the "unused argument" warning because lettuce uses "step"
# pylint: disable=W0613
# django_url is assigned late in the process of loading lettuce,
# so we import this as a module, and then read django_url from
# it to get the correct value
import lettuce.django
from lettuce import world, step
from .course_helpers import *
from .ui_helpers import *
from nose.tools import assert_equals # pylint: disable=E0611
from xmodule.modulestore.locations import SlashSeparatedCourseKey
from logging import getLogger
logger = getLogger(__name__)
@step(r'I wait (?:for )?"(\d+\.?\d*)" seconds?$')
def wait_for_seconds(step, seconds):
world.wait(seconds)
@step('I reload the page$')
def reload_the_page(step):
world.wait_for_ajax_complete()
world.browser.reload()
world.wait_for_js_to_load()
@step('I press the browser back button$')
def browser_back(step):
world.browser.driver.back()
@step('I (?:visit|access|open) the homepage$')
def i_visit_the_homepage(step):
world.visit('/')
assert world.is_css_present('header.global')
@step(u'I (?:visit|access|open) the dashboard$')
def i_visit_the_dashboard(step):
world.visit('/dashboard')
assert world.is_css_present('section.container.dashboard')
@step('I should be on the dashboard page$')
def i_should_be_on_the_dashboard(step):
assert world.is_css_present('section.container.dashboard')
assert 'Dashboard' in world.browser.title
@step(u'I (?:visit|access|open) the courses page$')
def i_am_on_the_courses_page(step):
world.visit('/courses')
assert world.is_css_present('section.courses')
@step(u'I press the "([^"]*)" button$')
def and_i_press_the_button(step, value):
button_css = 'input[value="%s"]' % value
world.css_click(button_css)
@step(u'I click the link with the text "([^"]*)"$')
def click_the_link_with_the_text_group1(step, linktext):
world.click_link(linktext)
@step('I should see that the path is "([^"]*)"$')
def i_should_see_that_the_path_is(step, path):
assert world.url_equals(path)
@step(u'the page title should be "([^"]*)"$')
def the_page_title_should_be(step, title):
assert_equals(world.browser.title, title)
@step(u'the page title should contain "([^"]*)"$')
def the_page_title_should_contain(step, title):
assert(title in world.browser.title)
@step('I log in$')
def i_log_in(step):
world.log_in(username='robot', password='test')
@step('I am a logged in user$')
def i_am_logged_in_user(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
@step('I am not logged in$')
def i_am_not_logged_in(step):
world.visit('logout')
@step('I am staff for course "([^"]*)"$')
def i_am_staff_for_course_by_id(step, course_id):
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
world.register_by_course_key(course_key, True)
@step(r'click (?:the|a) link (?:called|with the text) "([^"]*)"$')
def click_the_link_called(step, text):
world.click_link(text)
@step(r'should see that the url is "([^"]*)"$')
def should_have_the_url(step, url):
assert_equals(world.browser.url, url)
@step(r'should see (?:the|a) link (?:called|with the text) "([^"]*)"$')
def should_see_a_link_called(step, text):
assert len(world.browser.find_link_by_text(text)) > 0
@step(r'should see (?:the|a) link with the id "([^"]*)" called "([^"]*)"$')
def should_have_link_with_id_and_text(step, link_id, text):
link = world.browser.find_by_id(link_id)
assert len(link) > 0
assert_equals(link.text, text)
@step(r'should see a link to "([^"]*)" with the text "([^"]*)"$')
def should_have_link_with_path_and_text(step, path, text):
link = world.browser.find_link_by_text(text)
assert len(link) > 0
assert_equals(link.first["href"], lettuce.django.django_url(path))
@step(r'should( not)? see "(.*)" (?:somewhere|anywhere) (?:in|on) (?:the|this) page')
def should_see_in_the_page(step, doesnt_appear, text):
if world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
multiplier = 2
else:
multiplier = 1
if doesnt_appear:
assert world.browser.is_text_not_present(text, wait_time=5 * multiplier)
else:
assert world.browser.is_text_present(text, wait_time=5 * multiplier)
@step('I am logged in$')
def i_am_logged_in(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
world.browser.visit(lettuce.django.django_url('/'))
dash_css = 'section.container.dashboard'
assert world.is_css_present(dash_css)
@step(u'I am an edX user$')
def i_am_an_edx_user(step):
world.create_user('robot', 'test')
@step(u'User "([^"]*)" is an edX user$')
def registered_edx_user(step, uname):
world.create_user(uname, 'test')
@step(u'All dialogs should be closed$')
def dialogs_are_closed(step):
assert world.dialogs_closed()
@step(u'visit the url "([^"]*)"')
def visit_url(step, url):
world.browser.visit(lettuce.django.django_url(url))
@step(u'wait for AJAX to (?:finish|complete)')
def wait_ajax(_step):
wait_for_ajax_complete()
@step('I will confirm all alerts')
def i_confirm_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return true;} ; window.alert = function(){return;}')
@step('I will cancel all alerts')
def i_cancel_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return false;} ; window.alert = function(){return;}')
@step('I will answer all prompts with "([^"]*)"')
def i_answer_prompts_with(step, prompt):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.prompt = function(){return %s;}') % prompt
@step('I run ipdb')
def run_ipdb(_step):
"""Run ipdb as step for easy debugging"""
import ipdb
ipdb.set_trace()
assert True
|
nanolearning/edx-platform
|
common/djangoapps/terrain/steps.py
|
Python
|
agpl-3.0
| 6,853
|
[
"VisIt"
] |
aa52693767cb897c9ba7aa0d680ff07f8281d5de054ff6596e55624807ace776
|
__author__ = 'Deniz'
from bs4 import BeautifulSoup
from splinter import Browser
import argparse, os, re, time
mmr_filepath_Dict = {}
def main():
global mmr_filepath_Dict
BASE_URL = "http://na.op.gg/"
parser = argparse.ArgumentParser(description='Attempt to search op.gg with the summoner names in every file in the'
'given directory location. Scrap html to find mmr and avg mmr.')
parser.add_argument('-in', metavar='i', type=str)
args = parser.parse_args()
inputLocation = vars(args).values()[0]
get_summoner_ids_names(inputLocation, BASE_URL)
# For every file in the input dir, get the summoner names and ids
def get_summoner_ids_names(inputLocation, BASE_URL):
# Define search terms for summoner id
start_summoner_id = "': "
end_summoner_id = ", u'matchHistoryUri"
# Define search terms for summoner name
start_summoner_name = "u'summonerName': u'"
end_summoner_name = "'}"
for root, __, files in os.walk(inputLocation):
for f in files:
fullpath = os.path.join(root, f)
f = open(fullpath, 'r')
read_file = f.readlines()
f.close()
# Split up the match history (which is all 1 line) by the summonerId string
match_history_split = str(read_file).split("u'summonerId")
# Pop off first element of array (doesn't contain any summoner ID's)
match_history_split.pop(0)
#print match_history_split[0]
# Get first element of array
match = match_history_split[0]
# Lop off everything past 135 characters
match = match[:135]
#print "MATCH: " + match
# Find the summoner id
tmp_id = re.search("%s(.*)%s" % (start_summoner_id, end_summoner_id), str(match)).group(1)
# Find the summoner name
tmp_name = re.search("%s(.*)%s" % (start_summoner_name, end_summoner_name), str(match)).group(1)
# Strip the summoner name of all whitespace
tmp_name = tmp_name.replace(' ', '')
# Pass the name, id to splinter to search on op.gg
browser = navigate_to_summoner_page(BASE_URL, tmp_name)
# Attempt to click Check MMR button
click_check_mmr(browser, fullpath, tmp_name)
# Parse the webpage to find mmr
find_mmr(browser, fullpath, tmp_name)
def navigate_to_summoner_page(BASE_URL, summonerName):
browser = Browser('chrome')
print "BROWSER visiting URL: " + str(BASE_URL+'summoner/userName='+summonerName)
browser.visit(BASE_URL+'summoner/userName='+summonerName)
return browser
def click_check_mmr(browser, fullpath, summonerName):
# Declare boolean switch
summonerFound = False
# Find the second button with the css .opButton.small (the first button is Ingame Info, second is Check MMR)
try:
button = browser.find_by_css('.opButton.small')[1]
button.click()
summonerFound = True
except Exception:
summonerFound = False
print "ERR: Summoner " + summonerName + " NOT found."
if summonerFound:
# Parse the webpage to find mmr
find_mmr(browser, fullpath, summonerName)
# Declare attempt counter
atmpt_cnt = 0
def find_mmr(browser, fullpath, summonerName):
global atmpt_cnt
# Wait 2 seconds before searching for MMR data
time.sleep(2)
# Declare boolean switch
mmr_found = False
# Find the MMR by css
try:
get_mmr = browser.find_by_css('div.InnerSummonerMMR').first.value
mmr_found = True
except Exception:
if atmpt_cnt < 5:
print "ERR: MMR CSS NOT FOUND"
print "ATTEMPT " + str(atmpt_cnt) + ": Waiting 5 seconds before trying again..."
time.sleep(5)
atmpt_cnt += 1
mmr_found = False
# Recursively try to click MMR button and parse again, up to 5 times.
click_check_mmr(browser, fullpath, summonerName)
else:
mmr_found = False
print "ERR: MMR CSS NOT FOUND, ALL ATTEMPTS EXHAUSTED"
if mmr_found:
# Define search terms
start = 'MMR for this league is '
end = 'beta'
# Define regex to search for mmr
mmr = re.findall("%s(.*)%s" % (start, end), str(get_mmr), re.S)
# Split mmr to get avg mmr and mmr
mmr = str(mmr).split(r'.\n')
try:
# Strip last 2 characters off avg mmr ']
mmr[0] = mmr[0][2:]
_avg_mmr = mmr[0]
except IndexError:
# This means avg mmr wasn't found properly
print 'ERR: AVG MMR NOT FOUND PROPERLY'
_avg_mmr = 'NONE'
try:
# Strip first 2 characters off mmr ['
mmr[1] = mmr[1][:-2]
_mmr = mmr[1]
except IndexError:
# This means mmr wasn't found properly
print 'ERR: MMR NOT FOUND PROPERLY'
_mmr = 'NONE'
print "MMR: " + _mmr
print "AVERAGE LEAGUE MMR: " + _avg_mmr
atmpt_cnt = 0 # Reset attempt counter
# Close the browser, kills chromedriver.exe as well
browser.quit()
# Write MMR info to filenames
write_mmr(_avg_mmr, _mmr, fullpath)
# Given avg mmr, mmr, and a filepath slice the last 4 characters off the filepath (.txt) append
# the avg mmr and mmr, add .txt back in and replace the old filepath with the new one.
def write_mmr(avg_mmr, mmr, fullpath):
# Rename the file
os.rename(os.curdir+'\\'+fullpath, os.curdir+'\\'+fullpath[:-4]+"_mmr="+str(mmr)+"_avg="+str(avg_mmr)+'.txt')
if __name__ == "__main__":
main()
|
Murkantilism/LoL_API_Research
|
Summoner_Data_Retrieval/Scrape_mmr_opgg.py
|
Python
|
mit
| 5,728
|
[
"VisIt"
] |
8c2ac75b6f292add326e8f98fa52b9f340e7058feab45935fd9670c81b66c288
|
global astrom
global tmpdir
import traceback, tempfile
tmpdir = '/usr/work/pkelly/'
astrom='solve-field'
def length_swarp(SUPA,FLAT_TYPE,CHIPS):
import os, re, utilities, bashreader, sys, string
from copy import copy
from glob import glob
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
all_chip_dict = {}
NUMScommas = reduce(lambda x,y: str(x) + ',' + str(y),CHIPS.keys())
all_chip_dict['CHIPS'] = NUMScommas
print sorted(CHIPS.keys())
NUMS = []
start = 1
crpix1s = []
crpix2s = []
for CHIP in CHIPS.keys():
NUMS.append(CHIP)
if len(CHIPS[CHIP]) == 0:
print CHIP
if len(CHIPS[CHIP]) > 0:
crpix = CHIPS[CHIP]
import re
p = re.compile('\_\d+O')
file = p.sub('_' + str(CHIP) + 'O',search_params['file'])
print file, CHIP
naxis = utilities.get_header_kw(file,['NAXIS1','NAXIS2'])
for kw in ['NAXIS1','NAXIS2']:
crpix[kw] = float(naxis[kw])
print naxis[kw]
print file
if start == 1:
crpixzero = copy(crpix)
crpixhigh = copy(crpix)
start = 0
from copy import copy
print float(crpix['CRPIX1']) < float(crpixzero['CRPIX1']), float(crpix['CRPIX2']) < float(crpixzero['CRPIX2'])
if float(crpix['CRPIX1']) + 0 >= float(crpixzero['CRPIX1']):
crpixzero['CRPIX1'] = copy(crpix['CRPIX1'])
if float(crpix['CRPIX2']) + 0 >= float(crpixzero['CRPIX2']):
crpixzero['CRPIX2'] = copy(crpix['CRPIX2'])
if float(crpix['CRPIX1']) - 0 <= float(crpixhigh['CRPIX1']):
crpixhigh['CRPIX1'] = copy(crpix['CRPIX1'])
if float(crpix['CRPIX2']) - 0 <= float(crpixhigh['CRPIX2']):
crpixhigh['CRPIX2'] = copy(crpix['CRPIX2'])
crpix1s.append(copy(crpix['CRPIX1']))
crpix2s.append(copy(crpix['CRPIX2']))
print crpix['CRPIX1'], crpix['CRPIX2'], crpixzero['CRPIX1'], crpixzero['CRPIX2'], crpixhigh['CRPIX1'], crpixhigh['CRPIX2']#, crpixhigh
print crpix.keys()
for kw in ['CRPIX1','CRPIX2','CRVAL1','CRVAL2']:
all_chip_dict[kw+ '_' + str(CHIP)] = crpix[kw]
#plot_chips(crpix1s,crpix2s)
for i in range(len(crpix1s)):
print crpix1s[i],crpix2s[i], NUMS[i]
crpix1s.sort()
crpix2s.sort()
print len(crpix1s), crpix1s, crpix2s, crpix1s[-1] - crpix1s[0] + crpix['NAXIS1'], crpix2s[-1] - crpix2s[0] + crpix['NAXIS2']
print all_chip_dict
LENGTH1 = abs(float(crpixhigh['CRPIX1']) - float(crpixzero['CRPIX1'])) + float(crpix['NAXIS1'])
LENGTH2 = abs(float(crpixhigh['CRPIX2']) - float(crpixzero['CRPIX2'])) + float(crpix['NAXIS2'])
print LENGTH1, LENGTH2, crpixzero['CRPIX1'], crpixzero['CRPIX2'], crpixhigh['CRPIX1'], crpixhigh['CRPIX2']#, crpixhigh
all_chip_dict.update({'crfixed':'third','LENGTH1':LENGTH1,'LENGTH2':LENGTH2,'CRPIX1ZERO':crpixzero['CRPIX1'],'CRPIX2ZERO':crpixzero['CRPIX2'],'CRVAL1':crpix['CRVAL1'],'CRVAL2':crpix['CRVAL2']})
save_exposure(all_chip_dict,SUPA,FLAT_TYPE)
def fix_radec(SUPA,FLAT_TYPE):
#cats = [{'im_type': 'DOMEFLAT', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.DOMEFLAT.fixwcs.rawconv'}, {'im_type': 'SKYFLAT', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.SKYFLAT.fixwcs.rawconv'}, {'im_type': 'OCIMAGE', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.OCIMAGE.fixwcs.rawconv'}]
#outfile = '' + search_params['TEMPDIR'] + 'stub'
#cats = [{'im_type': 'MAIN', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS..fixwcs.rawconv'}, {'im_type': 'D', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.D.fixwcs.rawconv'}]
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy
from config_bonn import cluster, tag, arc, filters
ppid = str(os.getppid())
#chips = length(SUPA,FLAT_TYPE)
#import time
#time.sleep(2)
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
from copy import copy
chips = {}
NUMS = []
for image in dict['files']:
params = copy(search_params)
ROOT = re.split('\.',re.split('\/',image)[-1])[0]
params['ROOT'] = ROOT
BASE = re.split('O',ROOT)[0]
params['BASE'] = BASE
NUM = re.split('O',re.split('\_',ROOT)[1])[0]
params['NUM'] = NUM
print NUM, BASE, ROOT, image
params['GAIN'] = 2.50 ## WARNING!!!!!!
print ROOT
finalflagim = "%(TEMPDIR)sflag_%(ROOT)s.fits" % params
res = re.split('SCIENCE',image)
res = re.split('/',res[0])
if res[-1]=='':res = res[:-1]
params['path'] = reduce(lambda x,y:x+'/'+y,res[:-1])
params['fil_directory'] = res[-1]
res = re.split('_',res[-1])
params['directory'] = res[0]
SDSS1 = "/%(path)s/%(directory)s/SCIENCE/headers_scamp_SDSS-R6/%(BASE)sO*.head" % params # it's not a ZERO!!!
SDSS2 = "/%(path)s/%(directory)s/SCIENCE/headers_scamp_SDSS-R6/%(BASE)sO*.head" % params
from glob import glob
print glob(SDSS1), glob(SDSS2)
head = None
if len(glob(SDSS1)) > 0:
head = glob(SDSS1)[0]
elif len(glob(SDSS2)) > 0:
head = glob(SDSS2)[0]
print head, SDSS2
w = {}
if head is not None:
keys = []
hf = open(head,'r').readlines()
print head
for line in hf:
import re
if string.find(line,'=') != -1:
res = re.split('=',line)
name = res[0].replace(' ','')
res = re.split('/',res[1])
value = res[0].replace(' ','')
print name, value
if string.find(name,'CD')!=-1 or string.find(name,'PV')!=-1 or string.find(name,'CR')!=-1 or string.find(name,'NAXIS') != -1:
w[name] = float(value)
print line, w[name]
keys.append(name)
from copy import copy
chips[NUM] = copy(w)
print w
NUMS.append(NUM)
length_swarp(SUPA,FLAT_TYPE,chips)
vecs = {}
for key in keys:
vecs[key] = []
vecs['good_scamp'] = []
hdu= pyfits.open(search_params['pasted_cat'])
table = hdu[2].data
CHIP = table.field('CHIP')
for i in range(len(CHIP)):
NUM = str(int(CHIP[i]))
for key in keys:
if chips[NUM].has_key(key):
vecs[key].append(float(chips[NUM][key]))
vecs['good_scamp'].append(1)
else:
vecs[key].append(-1.)
vecs['good_scamp'].append(0)
print vecs.keys()
import scipy
for key in vecs.keys():
vecs[key] = scipy.array(vecs[key])
print vecs[key][0:20], key
ra_cat = table.field('ALPHA_J2000')
dec_cat = table.field('DELTA_J2000')
x0 = (table.field('Xpos') - vecs['CRPIX1'])
y0 = (table.field('Ypos') - vecs['CRPIX2'])
x = x0*vecs['CD1_1'] + y0*vecs['CD1_2']
y = x0*vecs['CD2_1'] + y0*vecs['CD2_2']
r = (x**2. + y**2.)**0.5
xi_terms = {'PV1_0':scipy.ones(len(x)),'PV1_1':x,'PV1_2':y,'PV1_3':r,'PV1_4':x**2.,'PV1_5':x*y,'PV1_6':y**2.,'PV1_7':x**3.,'PV1_8':x**2.*y,'PV1_9':x*y**2.,'PV1_10':y**3.}
pv1_keys = filter(lambda x: string.find(x,'PV1') != -1, vecs.keys())
print 'pv1_keys', pv1_keys
xi = reduce(lambda x,y: x + y, [xi_terms[k]*vecs[k] for k in pv1_keys])
eta_terms = {'PV2_0':scipy.ones(len(x)),'PV2_1':y,'PV2_2':x,'PV2_3':r,'PV2_4':y**2.,'PV2_5':y*x,'PV2_6':x**2.,'PV2_7':y**3.,'PV2_8':y**2.*x,'PV2_9':y*x**2.,'PV2_10':x**3.}
pv2_keys = filter(lambda x: string.find(x,'PV2') != -1, vecs.keys())
print 'pv2_keys', pv2_keys
eta = reduce(lambda x,y: x + y, [eta_terms[k]*vecs[k] for k in pv2_keys])
print xi[0:10],eta[0:10], len(eta)
print vecs.keys(), vecs['CD1_1'][0],vecs['CD1_2'][0],vecs['CD2_2'][0],vecs['CD2_1'][0]
import math
ra_out = []
dec_out = []
cat = open('cat','w')
for i in range(len(xi)):
XI = xi[i] / 180.0 * math.pi
ETA = eta[i] / 180.0 * math.pi
CRVAL1 = vecs['CRVAL1'][i]/180.0* math.pi
CRVAL2 = vecs['CRVAL2'][i]/180.0 * math.pi
p = math.sqrt(XI**2. + ETA**2.)
c = math.atan(p)
a = CRVAL1 + math.atan((XI*math.sin(c))/(p*math.cos(CRVAL2)*math.cos(c) - ETA*math.sin(CRVAL2)*math.sin(c)))
d = math.asin(math.cos(c)*math.sin(CRVAL2) + ETA*math.sin(c)*math.cos(CRVAL2)/p)
ra = a*180.0/math.pi
dec = d*180.0/math.pi
if i % 100== 0:
print 'ra_cat','dec_cat',ra,ra_cat[i], dec, dec_cat[i]
print (ra-ra_cat[i])*3600.,(dec-dec_cat[i])*3600.
''' if no solution, give a -999 value '''
if vecs['good_scamp'][i] != 1:
import random
ra = -999 - 200*random.random()
dec = -999 - 200*random.random()
ra_out.append(ra)
dec_out.append(dec)
cat.write(str(ra) + ' ' + str(dec) + '\n')
#cat.write(str(ra[i]) + ' ' + str(dec[i]) + '\n')
cat.close()
os.system(' mkreg.pl -xcol 0 -ycol 1 -c -rad 3 -wcs cat')
hdu[2].data.field('ALPHA_J2000')[:] = scipy.array(ra_out)
hdu[2].data.field('DELTA_J2000')[:] = scipy.array(dec_out)
table = hdu[2].data
print 'BREAK'
print ra_out[0:10], table.field('ALPHA_J2000')[0:10]
print 'BREAK'
print dec_out[0:10], table.field('DELTA_J2000')[0:10]
print SUPA, search_params['pasted_cat']
os.system('rm ' + search_params['pasted_cat'])
hdu.writeto(search_params['pasted_cat'])
save_exposure({'fixradec':1},SUPA,FLAT_TYPE)
def mk_tab(list):
import astropy, astropy.io.fits as pyfits
from pyfits import Column
import numarray
cols = []
for ele in list:
array = ele[0]
name = ele[1]
vec = numarray.array(array)
cols.append(Column(name=name,format='1E',array=array))
coldefs = pyfits.ColDefs(cols)
hdu = pyfits.BinTableHDU.from_columns(coldefs)
return hdu
def merge(t1,t2):
import astropy, astropy.io.fits as pyfits
t = t1.columns + t2[1].columns
hdu = pyfits.BinTableHDU.from_columns(t)
return hdu
def cutout(infile,mag,color='red'):
import os, utilities
ppid = str(os.getppid())
print ppid + 'a'
#pylab.show()
outfile = raw_input('name of output file?')
color = raw_input('color of regions?')
limits = ['lower_mag','upper_mag','lower_diff','upper_diff']
lim_dict = {}
for lim in limits:
print lim + '?'
b = raw_input()
lim_dict[lim] = b
utilities.run('ldacfilter -i ' + infile + ' -t PSSC\
-c "(((SEx_' + mag + '>' + str(lim_dict['lower_mag']) + ') AND (SEx_' + mag + '<' + str(lim_dict['upper_mag']) + ')) AND (magdiff>' + str(lim_dict['lower_diff']) + ')) AND (magdiff<' + str(lim_dict['upper_diff']) + ');"\
-o cutout1.' + ppid,['cutout1.' + ppid])
utilities.run('ldactoasc -b -q -i cutout1.' + ppid + ' -t PSSC\
-k Ra Dec > /tmp/' + outfile,[outfile])
utilities.run('mkreg.pl -c -rad 8 -xcol 0 -ycol 1 -wcs -colour ' + color + ' /tmp/' + outfile)
def get_median(cat,key):
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy
p = pyfits.open(cat)
magdiff = p[1].data.field(key)
magdiff.sort()
return magdiff[int(len(magdiff)/2)]
def coordinate_limits(cat):
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy
p = pyfits.open(cat)
good_entries = p[2].data
mask = abs(good_entries.field('ALPHA_J2000')) > 0.0001
good_entries = good_entries[mask]
mask = abs(good_entries.field('ALPHA_J2000')) < 400
good_entries = good_entries[mask]
mask = abs(good_entries.field('DELTA_J2000')) > 0.0001
good_entries = good_entries[mask]
mask = abs(good_entries.field('DELTA_J2000')) < 300
good_entries = good_entries[mask]
ra = good_entries.field('ALPHA_J2000')
ra.sort()
dec = good_entries.field('DELTA_J2000')
dec.sort()
print cat, 'cat'
return ra[0],ra[-1],dec[0],dec[-1]
def combine_cats(cats,outfile,search_params):
#cats = [{'im_type': 'DOMEFLAT', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.DOMEFLAT.fixwcs.rawconv'}, {'im_type': 'SKYFLAT', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.SKYFLAT.fixwcs.rawconv'}, {'im_type': 'OCIMAGE', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.OCIMAGE.fixwcs.rawconv'}]
#outfile = '' + search_params['TEMPDIR'] + 'stub'
#cats = [{'im_type': 'MAIN', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS..fixwcs.rawconv'}, {'im_type': 'D', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.D.fixwcs.rawconv'}]
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy
from config_bonn import cluster, tag, arc, filters
ppid = str(os.getppid())
tables = {}
colset = 0
cols = []
for catalog in cats:
file = catalog['cat']
os.system('mkdir ' + search_params['TEMPDIR'] )
aper = tempfile.NamedTemporaryFile(dir=search_params['TEMPDIR']).name
os.system('ldactoasc -i ' + catalog['cat'] + ' -b -s -k MAG_APER MAGERR_APER -t OBJECTS > ' + aper)
cat1 = tempfile.NamedTemporaryFile(dir=search_params['TEMPDIR']).name
os.system('asctoldac -i ' + aper + ' -o ' + cat1 + ' -t OBJECTS -c ' + os.environ['bonn'] + '/photconf/MAG_APER.conf')
allconv = tempfile.NamedTemporaryFile(dir=search_params['TEMPDIR']).name
os.system('ldacjoinkey -i ' + catalog['cat'] + ' -p ' + cat1 + ' -o ' + allconv + ' -k MAG_APER1 MAG_APER2 MAGERR_APER1 MAGERR_APER2')
tables[catalog['im_type']] = pyfits.open(allconv)
#if filter == filters[0]:
# tables['notag'] = pyfits.open('' + search_params['TEMPDIR'] + 'all.conv' )
for catalog in cats:
for i in range(len(tables[catalog['im_type']][1].columns)):
print catalog['im_type'], catalog['cat']
if catalog['im_type'] != '':
tables[catalog['im_type']][1].columns[i].name = tables[catalog['im_type']][1].columns[i].name + catalog['im_type']
else:
tables[catalog['im_type']][1].columns[i].name = tables[catalog['im_type']][1].columns[i].name
cols.append(tables[catalog['im_type']][1].columns[i])
print cols
print len(cols)
hdu = pyfits.PrimaryHDU()
hduIMHEAD = pyfits.BinTableHDU.from_columns(tables[catalog['im_type']][2].columns)
hduOBJECTS = pyfits.BinTableHDU.from_columns(cols)
hdulist = pyfits.HDUList([hdu])
hdulist.append(hduIMHEAD)
hdulist.append(hduOBJECTS)
hdulist[1].header['EXTNAME']='FIELDS'
hdulist[2].header['EXTNAME']='OBJECTS'
print file
os.system('rm ' + outfile)
import re
res = re.split('/',outfile)
os.system('mkdir -p ' + reduce(lambda x,y: x + '/' + y,res[:-1]))
hdulist.writeto(outfile)
print outfile , '$#######$'
print 'done'
def paste_cats(cats,outfile): #cats,outfile,search_params):
#outfile = '/tmp/test.cat'
#cats = ['/tmp/15464/SUPA0028506_1OCFS.newpos', '/tmp/15464/SUPA0028506_9OCFS.newpos']
#print outfile, cats
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy
from config_bonn import cluster, tag, arc, filters
ppid = str(os.getppid())
tables = {}
colset = 0
cols = []
table = pyfits.open(cats[0])
data = []
nrows = 0
good_cats = []
''' get rid of empty tables '''
for catalog in cats:
cattab = pyfits.open(catalog)
if not str(type(cattab[2].data)) == "<type 'NoneType'>":
good_cats.append(catalog)
cats = good_cats
for catalog in cats:
cattab = pyfits.open(catalog)
nrows += cattab[2].data.shape[0]
hduOBJECTS = pyfits.BinTableHDU.from_columns(table[2].columns, nrows=nrows)
rowstart = 0
rowend = 0
for catalog in cats:
cattab = pyfits.open(catalog)
rowend += cattab[2].data.shape[0]
for i in range(len(cattab[2].columns)):
hduOBJECTS.data.field(i)[rowstart:rowend]=cattab[2].data.field(i)
rowstart = rowend
# update SeqNr
print rowend,len( hduOBJECTS.data.field('SeqNr')), len(range(1,rowend+1))
hduOBJECTS.data.field('SeqNr')[0:rowend]=range(1,rowend+1)
#hdu[0].header['EXTNAME']='FIELDS'
hduIMHEAD = pyfits.BinTableHDU.from_columns(table[1])
print cols
print len(cols)
hdu = pyfits.PrimaryHDU()
hdulist = pyfits.HDUList([hdu])
hdulist.append(hduIMHEAD)
hdulist.append(hduOBJECTS)
hdulist[1].header['EXTNAME']='FIELDS'
hdulist[2].header['EXTNAME']='OBJECTS'
print file
os.system('rm ' + outfile)
hdulist.writeto(outfile)
print outfile , '$#######$'
print 'done'
def imstats(SUPA,FLAT_TYPE):
import os, re, utilities, bashreader, sys, string
from copy import copy
from glob import glob
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
print dict['files']
import commands
tmp_dicts = []
for file in dict['files']:
op = commands.getoutput('imstats ' + dict['files'][0])
print op
res = re.split('\n',op)
for line in res:
if string.find(line,'filename') != -1:
line = line.replace('$ imstats: ','')
res2 = re.split('\t',line)
res3 = re.split('\s+',res[-1])
tmp_dict = {}
for i in range(len(res3)):
tmp_dict[res2[i]] = res3[i]
tmp_dicts.append(tmp_dict)
print tmp_dicts
median_average = 0
sigma_average = 0
for d in tmp_dicts:
print d.keys()
sigma_average += float(d['sigma'])
median_average += float(d['median'])
dict['sigma_average'] = sigma_average / len(tmp_dicts)
dict['median_average'] = median_average / len(tmp_dicts)
print dict['sigma_average'], dict['median_average']
save_exposure(dict,SUPA,FLAT_TYPE)
def save_fit(fits,im_type,type,SUPA,FLAT_TYPE):
import MySQLdb, sys, os, re, time
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
for fit in fits:
#which_solution += 1
user_name = os.environ['USER']
time_now = time.asctime()
user = user_name #+ str(time.time())
dict = {}
#copy array but exclude lists
for ele in fit['class'].fitvars.keys():
if ele != 'condition' and ele != 'model_name' and ele != 'fixed_name':
dict[ele + '_' + type + '_' + im_type] = fit['class'].fitvars[ele]
save_exposure(dict,SUPA,FLAT_TYPE)
db2.close()
def select_analyze():
import MySQLdb, sys, os, re, time, string
from copy import copy
db2,c = connect_except()
command = "DESCRIBE illumination_db"
print command
c.execute(command)
results = c.fetchall()
keys = []
for line in results:
keys.append(line[0])
command = "SELECT * from illumination_db where zp_err_galaxy_D is null and PPRUN='2002-06-04_W-J-V'" # where OBJNAME='HDFN' and filter='W-J-V' and ROTATION=0"
command = "SELECT * from illumination_db where color1_star > 0.2 and OBJNAME!='HDFN' limit 2" # where matched_cat_star is null" # where OBJNAME='HDFN' and filter='W-J-V' and ROTATION=0"
first = True
while len(results) > 100 or first:
first = False
#command= "SELECT * from illumination_db where (OBJNAME like 'A%' or OBJNAME like 'MACS%') and (pasted_cat is null or pasted_cat like '%None%') ORDER BY RAND()" # and PPRUN='2003-04-04_W-C-IC'"
command= "SELECT * from illumination_db where (OBJNAME like 'A%' or OBJNAME like 'MACS%') and (pasted_cat is null or pasted_cat like '%None%' or CRVAL1ASTROMETY_2 is null) ORDER BY RAND()" # and PPRUN='2003-04-04_W-C-IC'"
command= "SELECT * from illumination_db where fixradec is null and OBJNAME like '%0850%' and OBJECT!='VMF' and SUPA!='SUPA0050874' and BADCCD!=1.0" #(pasted_cat is null or pasted_cat like '%None%' or CRVAL1ASTROMETRY_2 is null) ORDER BY RAND()" # and PPRUN='2003-04-04_W-C-IC'"
command="SELECT * from illumination_db where resam=1"
command = "SELECT * from illumination_db where OBJNAME like 'TEST' and filter='W-C-IC'" # and pasted_cat like '%1423%' " # limit 1"# and filter='W-J-B' and PPRUN='2006-12-21_W-J-B'"
#command= "SELECT * from illumination_db where SUPA='SUPA0021292'" # where (OBJNAME like 'A%' or OBJNAME like 'MACS%') and SUPA='SUPA0020098'" # and PPRUN='2003-04-04_W-C-IC'"
#command = "select * from illumination_db where SUPA='SUPA0028506'"
#command = "select * from illumination_db where (OBJECT like '%0018short%') and (FILTER='W-J-B' or FILTER='W-S-Z+')" # or OBJECT like '%0018short%')" # and pasted_cat is null" # and color1_star_ is null"
print command
c.execute(command)
results = c.fetchall()
print len(results)
#print results
dicts = []
for j in range(len(results)):
dict = {}
for i in range(len(results[j])):
dict[keys[i]] = results[j][i]
#print dict['SUPA'], dict['file'], dict['OBJNAME'], dict['pasted_cat'], dict['matched_cat_star']
#good = raw_input()
d_update = get_files(dict['SUPA'],dict['FLAT_TYPE'])
go = 0
if d_update.has_key('TRIED'):
if d_update['TRIED'] != 'YES':
go = 1
else: go = 1
if string.find(str(dict['TIME']),'N') == -1:
#print dict['TIME']
if time.time() - float(dict['TIME']) > 600:
go = 1
else: go = 0
else: go = 1
if 1: # go:
#print str(time.time())
save_exposure({'ACTIVE':'YES','TIME':str(time.time())},dict['SUPA'],dict['FLAT_TYPE'])
os.system('rm -R ' + tmpdir)
d = get_files(dict['SUPA'])
save_exposure({'weights_there':len(d['weight_files'])},dict['SUPA'],dict['FLAT_TYPE'])
analyze(dict['SUPA'],dict['FLAT_TYPE'],dict)
save_exposure({'ACTIVE':'FINISHED'},dict['SUPA'],dict['FLAT_TYPE'])
def analyze(SUPA,FLAT_TYPE,params={}):
#try:
import sys, os, string
#os.system('rm -rf ' + search_params['TEMPDIR'] + '*')
trial = True
ppid = str(os.getppid())
try:
#update_dict(SUPA, FLAT_TYPE)
#fix_chips(SUPA, FLAT_TYPE)
#imstats(SUPA,FLAT_TYPE)
if 1: #string.find(str(params['CRPIX1ZERO']),'None') != -1:
length_DEPRECATED(SUPA,FLAT_TYPE)
if 0: #string.find(str(params['fwhm']),'None') != -1:
find_seeing(SUPA,FLAT_TYPE)
sextract(SUPA,FLAT_TYPE)
#fix_radec(SUPA, FLAT_TYPE)
print 'finished'
raw_input()
#match_simple(SUPA,FLAT_TYPE)
#phot(SUPA,FLAT_TYPE)
#get_sdss_obj(SUPA,FLAT_TYPE)
#apply_photometric_calibration(SUPA,FLAT_TYPE)
print 'finished'
except KeyboardInterrupt:
raise
except:
ppid_loc = str(os.getppid())
print traceback.print_exc(file=sys.stdout)
if ppid_loc != ppid: sys.exit(0)
if trial:
raise Exception
#except KeyboardInterrupt:
# raise
#except:
# ppid_loc = str(os.getppid())
# print sys.exc_info()
# print 'something else failed',ppid, ppid_loc
# if ppid_loc != ppid: sys.exit(0)
# # os.system('rm -rf /tmp/' + ppid)
##
# os.system('rm -rf /tmp/' + ppid)
#
def get_files(SUPA,FLAT_TYPE=None):
import MySQLdb, sys, os, re
db2,c = connect_except()
command = "DESCRIBE illumination_db"
#print command
c.execute(command)
results = c.fetchall()
keys = []
for line in results:
keys.append(line[0])
command = "SELECT * from illumination_db where SUPA='" + SUPA + "'" # AND FLAT_TYPE='" + FLAT_TYPE + "'"
#print command
c.execute(command)
results = c.fetchall()
dict = {}
for i in range(len(results[0])):
dict[keys[i]] = results[0][i]
#print dict
file_pat = dict['file']
import re, glob
res = re.split('_\d+O',file_pat)
pattern = res[0] + '_*O' + res[1]
files = glob.glob(pattern)
dict['files'] = files
#print dict['files']
file_pat = dict['file']
import re, glob
res = re.split('_\d+O',file_pat)
pattern = res[0].replace('SCIENCE','WEIGHTS') + '_*O' + res[1].replace('.fits','.weight.fits')
print pattern
files = glob.glob(pattern)
dict['weight_files'] = files
print dict['weight_files']
db2.close()
return dict
def get_fits(CLUSTER,FILTER,PPRUN):
import MySQLdb, sys, os, re
db2,c = connect_except()
command="SELECT * from fit_db where FILTER='" + FILTER + "' and CLUSTER='" + CLUSTER + "' and PPRUN='" + PPRUN + "'"
print command
c.execute(command)
results=c.fetchall()
db_keys = describe_db(c,'fit_db')
dtop = {}
for line in results:
for i in range(len(db_keys)):
dtop[db_keys[i]] = str(line[i])
db2.close()
return dtop
def connect_except():
import MySQLdb, sys, os, re
notConnect = True
tried = 0
while notConnect:
tried += 1
try:
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
notConnect = False
except:
print traceback.print_exc(file=sys.stdout)
randwait = int(random.random()*30)
print 'rand wait', randwait
time.sleep(randwait)
if tried > 15: sys.exit(0)
print 'done'
return db2,c
def save_exposure(dict,SUPA=None,FLAT_TYPE=None):
if SUPA != None and FLAT_TYPE != None:
dict['SUPA'] = SUPA
dict['FLAT_TYPE'] = FLAT_TYPE
db2,c = connect_except()
#command = "CREATE TABLE IF NOT EXISTS illumination_db ( id MEDIUMINT NOT NULL AUTO_INCREMENT, PRIMARY KEY (id))"
#print command
#c.execute("DROP TABLE IF EXISTS illumination_db")
#c.execute(command)
from copy import copy
floatvars = {}
stringvars = {}
#copy array but exclude lists
import string
letters = string.ascii_lowercase + string.ascii_uppercase.replace('E','') + '_' + '-' + ','
for ele in dict.keys():
type = 'float'
for l in letters:
if string.find(str(dict[ele]),l) != -1:
type = 'string'
if type == 'float':
print ele, dict[ele]
floatvars[ele] = str(float(dict[ele]))
elif type == 'string':
stringvars[ele] = dict[ele]
# make database if it doesn't exist
print 'floatvars', floatvars
print 'stringvars', stringvars
for column in stringvars:
try:
command = 'ALTER TABLE illumination_db ADD ' + column + ' varchar(240)'
c.execute(command)
except: nope = 1
for column in floatvars:
try:
command = 'ALTER TABLE illumination_db ADD ' + column + ' float(30)'
c.execute(command)
except: nope = 1
# insert new observation
SUPA = dict['SUPA']
flat = dict['FLAT_TYPE']
c.execute("SELECT SUPA from illumination_db where SUPA = '" + SUPA + "' and flat_type = '" + flat + "'")
results = c.fetchall()
print results
if len(results) > 0:
print 'already added'
else:
command = "INSERT INTO illumination_db (SUPA,FLAT_TYPE) VALUES ('" + dict['SUPA'] + "','" + dict['FLAT_TYPE'] + "')"
print command
c.execute(command)
import commands
vals = ''
for key in stringvars.keys():
print key, stringvars[key]
vals += ' ' + key + "='" + str(stringvars[key]) + "',"
for key in floatvars.keys():
print key, floatvars[key]
vals += ' ' + key + "='" + floatvars[key] + "',"
vals = vals[:-1]
command = "UPDATE illumination_db set " + vals + " WHERE SUPA='" + dict['SUPA'] + "' AND FLAT_TYPE='" + dict['FLAT_TYPE'] + "'"
print command
c.execute(command)
print vals
#names = reduce(lambda x,y: x + ',' + y, [x for x in floatvars.keys()])
#values = reduce(lambda x,y: str(x) + ',' + str(y), [floatvars[x] for x in floatvars.keys()])
#names += ',' + reduce(lambda x,y: x + ',' + y, [x for x in stringvars.keys()])
#values += ',' + reduce(lambda x,y: x + ',' + y, ["'" + str(stringvars[x]) + "'" for x in stringvars.keys()])
#command = "INSERT INTO illumination_db (" + names + ") VALUES (" + values + ")"
#print command
#os.system(command)
db2.close()
def initialize(filter,OBJNAME):
import os, re, bashreader, sys, string, utilities
from glob import glob
from copy import copy
dict = bashreader.parseFile(os.environ['bonn'] + 'progs.ini')
for key in dict.keys():
os.environ[key] = str(dict[key])
import os
ppid = str(os.getppid())
PHOTCONF = os.environ['bonn'] + '/photconf/'
#TEMPDIR = '/usr/work/pkelly/' + ppid + '/'
TEMPDIR = tmpdir
os.system('mkdir ' + TEMPDIR)
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':OBJNAME}
search_params = {'path':path, 'OBJNAME':OBJNAME, 'filter':filter, 'PHOTCONF':PHOTCONF, 'DATACONF':os.environ['DATACONF'], 'TEMPDIR':TEMPDIR}
return search_params
def update_dict(SUPA,FLAT_TYPE):
import utilities
dict = get_files(SUPA,FLAT_TYPE)
print dict['file']
kws = utilities.get_header_kw(dict['file'],['ROTATION','OBJECT','GABODSID','CONFIG','EXPTIME','AIRMASS','INSTRUM','PPRUN','BADCCD']) # return KEY/NA if not SUBARU
save_exposure(kws,SUPA,FLAT_TYPE)
def gather_exposures(OBJNAME,filters=None):
if not filters:
filters = ['B','W-J-B','W-J-V','W-C-RC','W-C-IC','I','W-S-Z+']
for filter in filters:
search_params = initialize(filter,OBJNAME)
import os, re, bashreader, sys, string, utilities
from glob import glob
from copy import copy
searchstr = "/%(path)s/%(filter)s*CALIB/SCIENCE/*fits" % search_params
searchstr = "/%(path)s/%(filter)s*CALIB/SCIENCE/*fits" % search_params
print searchstr
files = glob(searchstr)
files.sort()
#print files
exposures = {}
# first 30 files
#print files[0:30]
import MySQLdb, sys, os, re
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
for file in files:
if string.find(file,'wcs') == -1 and string.find(file,'.sub.fits') == -1 and string.find(file,'I.fits') == -1:
res = re.split('_',re.split('/',file)[-1])
exp_name = res[0]
if not exposures.has_key(exp_name): exposures[exp_name] = {'images':[],'keywords':{}}
exposures[exp_name]['images'].append(file) # exp_name is the root of the image name
if len(exposures[exp_name]['keywords'].keys()) == 0: #not exposures[exp_name]['keywords'].has_key('ROTATION'): #if exposure does not have keywords yet, then get them
exposures[exp_name]['keywords']['filter'] = filter
exposures[exp_name]['keywords']['file'] = file
res2 = re.split('/',file)
for r in res2:
if string.find(r,filter) != -1:
print r
exposures[exp_name]['keywords']['date'] = r.replace(filter + '_','')
exposures[exp_name]['keywords']['fil_directory'] = r
search_params['fil_directory'] = r
kws = utilities.get_header_kw(file,['CRVAL1','CRVAL2','ROTATION','OBJECT','GABODSID','CONFIG','EXPTIME','AIRMASS','INSTRUM','PPRUN','BADCCD']) # return KEY/NA if not SUBARU
''' figure out a way to break into SKYFLAT, DOMEFLAT '''
ppid = str(os.getppid())
command = 'dfits ' + file + ' > ' + search_params['TEMPDIR'] + '/header'
utilities.run(command)
file = open('' + search_params['TEMPDIR'] + 'header','r').read()
import string
if string.find(file,'SKYFLAT') != -1: exposures[exp_name]['keywords']['FLAT_TYPE'] = 'SKYFLAT'
elif string.find(file,'DOMEFLAT') != -1: exposures[exp_name]['keywords']['FLAT_TYPE'] = 'DOMEFLAT'
#print file, exposures[exp_name]['keywords']['FLAT_TYPE']
file = open('' + search_params['TEMPDIR'] + 'header','r').readlines()
import string
for line in file:
print line
if string.find(line,'Flat frame:') != -1 and string.find(line,'illum') != -1:
import re
res = re.split('SET',line)
if len(res) > 1:
res = re.split('_',res[1])
set = res[0]
exposures[exp_name]['keywords']['FLAT_SET'] = set
res = re.split('illum',line)
res = re.split('\.',res[1])
smooth = res[0]
exposures[exp_name]['keywords']['SMOOTH'] = smooth
break
for kw in kws.keys():
exposures[exp_name]['keywords'][kw] = kws[kw]
exposures[exp_name]['keywords']['SUPA'] = exp_name
exposures[exp_name]['keywords']['OBJNAME'] = OBJNAME
print exposures[exp_name]['keywords']
save_exposure(exposures[exp_name]['keywords'])
return exposures
def find_seeing(SUPA,FLAT_TYPE):
import os, re, utilities, sys
from copy import copy
dict = get_files(SUPA,FLAT_TYPE)
print dict['file']
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
print dict['files']
#params PIXSCALE GAIN
''' quick run through for seeing '''
children = []
for image in search_params['files']:
child = os.fork()
if child:
children.append(child)
else:
params = copy(search_params)
ROOT = re.split('\.',re.split('\/',image)[-1])[0]
params['ROOT'] = ROOT
NUM = re.split('O',re.split('\_',ROOT)[1])[0]
params['NUM'] = NUM
print ROOT
weightim = "/%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits" % params
#flagim = "/%(path)s/%(fil_directory)s/WEIGHTS/globalflag_%(NUM)s.fits" % params
#finalflagim = TEMPDIR + "flag_%(ROOT)s.fits" % params
os.system('mkdir -p ' + params['TEMPDIR'])
params['finalflagim'] = weightim
#os.system('rm ' + finalflagim)
#command = "ic -p 16 '1 %2 %1 0 == ?' " + weightim + " " + flagim + " > " + finalflagim
#utilities.run(command)
command = "sex %(file)s -c %(PHOTCONF)s/singleastrom.conf.sex \
-FLAG_IMAGE ''\
-FLAG_TYPE MAX\
-CATALOG_NAME %(TEMPDIR)s/seeing_%(ROOT)s.cat \
-FILTER_NAME %(PHOTCONF)s/default.conv\
-CATALOG_TYPE 'ASCII' \
-DETECT_MINAREA 8 -DETECT_THRESH 8.\
-ANALYSIS_THRESH 8 \
-WEIGHT_IMAGE /%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits\
-WEIGHT_TYPE MAP_WEIGHT\
-PARAMETERS_NAME %(PHOTCONF)s/singleastrom.ascii.flag.sex" % params
print command
raw_input()
os.system(command)
sys.exit(0)
for child in children:
os.waitpid(child,0)
command = 'cat ' + search_params['TEMPDIR'] + 'seeing_' + SUPA + '*cat > ' + search_params['TEMPDIR'] + 'paste_seeing_' + SUPA + '.cat'
utilities.run(command)
file_seeing = search_params['TEMPDIR'] + '/paste_seeing_' + SUPA + '.cat'
PIXSCALE = float(search_params['PIXSCALE'])
reload(utilities)
fwhm = utilities.calc_seeing(file_seeing,10,PIXSCALE)
save_exposure({'fwhm':fwhm},SUPA,FLAT_TYPE)
print file_seeing, SUPA, PIXSCALE
def length_DEPRECATED(SUPA,FLAT_TYPE):
import os, re, utilities, bashreader, sys, string
from copy import copy
from glob import glob
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
res = re.split('SCIENCE',search_params['files'][0])
res = re.split('/',res[0])
if res[-1]=='':res = res[:-1]
search_params['path'] = reduce(lambda x,y:x+'/'+y,res[:-1])
res = re.split('\_',res[-1])
search_params['fil_directory'] = res[0]
print res, res[0]
raw_input()
print search_params['path'], search_params['fil_directory'], 'list'
save_exposure({'path':search_params['path'],'fil_directory':search_params['fil_directory']},SUPA,FLAT_TYPE)
return
''' get the CRPIX values '''
start = 1
#CRPIXZERO is at the chip at the bottom left and so has the greatest value!!!!
x = []
y = []
chips = {}
NUMS = []
all_chip_dict = {}
for image in search_params['files']:
print image
res = re.split('\_\d+',re.split('\/',image)[-1])
#print res
imroot = "/%(path)s/%(fil_directory)s/SCIENCE/" % search_params
im = imroot + res[0] + '_1' + res[1]
#print im
crpix = utilities.get_header_kw(image,['CRPIX1','CRPIX2','NAXIS1','NAXIS2','CRVAL1','CRVAL2','IMAGEID'])
if start == 1:
crpixzero = copy(crpix)
crpixhigh = copy(crpix)
start = 0
from copy import copy
print float(crpix['CRPIX1']) < float(crpixzero['CRPIX1']), float(crpix['CRPIX2']) < float(crpixzero['CRPIX2'])
if float(crpix['CRPIX1']) + 0 >= float(crpixzero['CRPIX1']):
crpixzero['CRPIX1'] = copy(crpix['CRPIX1'])
if float(crpix['CRPIX2']) + 0 >= float(crpixzero['CRPIX2']):
crpixzero['CRPIX2'] = copy(crpix['CRPIX2'])
if float(crpix['CRPIX1']) - 0 <= float(crpixhigh['CRPIX1']):
crpixhigh['CRPIX1'] = copy(crpix['CRPIX1'])
if float(crpix['CRPIX2']) - 0 <= float(crpixhigh['CRPIX2']):
crpixhigh['CRPIX2'] = copy(crpix['CRPIX2'])
print crpix['CRPIX1'], crpix['CRPIX2'], crpixzero['CRPIX1'], crpixzero['CRPIX2'], crpixhigh['CRPIX1'], crpixhigh['CRPIX2']#, crpixhigh
x.append(float(crpix['CRPIX1']))
y.append(float(crpix['CRPIX2']))
chips[crpix['IMAGEID']] = crpix
NUMS.append(crpix['IMAGEID'])
for kw in ['CRPIX1','CRPIX2','NAXIS1','NAXIS2','CRVAL1','CRVAL2']:
all_chip_dict[kw+ '_' + str(crpix['IMAGEID'])] = crpix[kw]
NUMScommas = reduce(lambda x,y: str(x) + ',' + str(y),NUMS)
all_chip_dict['CHIPS'] = NUMScommas
print all_chip_dict
LENGTH1 = abs(float(crpixhigh['CRPIX1']) - float(crpixzero['CRPIX1'])) + float(crpix['NAXIS1'])
LENGTH2 = abs(float(crpixhigh['CRPIX2']) - float(crpixzero['CRPIX2'])) + float(crpix['NAXIS2'])
chips['CRPIX1ZERO'] = crpixzero['CRPIX1']
chips['CRPIX2ZERO'] = crpixzero['CRPIX2']
chips['NAXIS1'] = crpixzero['NAXIS1']
chips['NAXIS2'] = crpixzero['NAXIS2']
print LENGTH1, LENGTH2, crpixzero['CRPIX1'], crpixzero['CRPIX2'], crpixhigh['CRPIX1'], crpixhigh['CRPIX2']#, crpixhigh
all_chip_dict.update({'crfixed':'third','LENGTH1':LENGTH1,'LENGTH2':LENGTH2,'CRPIX1ZERO':crpixzero['CRPIX1'],'CRPIX2ZERO':crpixzero['CRPIX2'],'CRVAL1':crpix['CRVAL1'],'CRVAL2':crpix['CRVAL2']})
save_exposure(all_chip_dict,SUPA,FLAT_TYPE)
return chips
#return x,y
def fix_chips(SUPA,FLAT_TYPE):
#cats = [{'im_type': 'DOMEFLAT', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.DOMEFLAT.fixwcs.rawconv'}, {'im_type': 'SKYFLAT', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.SKYFLAT.fixwcs.rawconv'}, {'im_type': 'OCIMAGE', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.OCIMAGE.fixwcs.rawconv'}]
#outfile = '' + search_params['TEMPDIR'] + 'stub'
#cats = [{'im_type': 'MAIN', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS..fixwcs.rawconv'}, {'im_type': 'D', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.D.fixwcs.rawconv'}]
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy
from config_bonn import cluster, tag, arc, filters
ppid = str(os.getppid())
chips = length(SUPA,FLAT_TYPE)
#import time
#time.sleep(2)
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
print chips
#print search_params['pasted_cat']
hdu= pyfits.open(search_params['pasted_cat'])
table = hdu[2].data
Xpos = table.field('Xpos')
Ypos = table.field('Ypos')
CHIP = table.field('CHIP')
import scipy
CRPIX1ZERO = scipy.ones(len(Xpos)) * (float(chips['CRPIX1ZERO']))
CRPIX2ZERO = scipy.ones(len(Ypos)) * (float(chips['CRPIX2ZERO']))
crpix1s = []
crpix2s = []
for i in range(len(CHIP)):
crpix1s.append(float(chips[str(int(CHIP[i]))]['CRPIX1']))
crpix2s.append(float(chips[str(int(CHIP[i]))]['CRPIX2']))
print len(Xpos), len(CRPIX1ZERO), len(crpix1s)
print Xpos[0:10], CRPIX1ZERO[0:10], crpix1s[0:10]
Xpos_ABS = scipy.array(Xpos) + CRPIX1ZERO - scipy.array(crpix1s)
Ypos_ABS = scipy.array(Ypos) + CRPIX2ZERO - scipy.array(crpix2s)
#print Xpos[0:10], Ypos[0:10], CHIP[0:10], crpix1s[0:10], crpix2s[0:10]
#print Xpos[-10:], Ypos[-10:], CHIP[-10:], crpix1s[-10:], crpix2s[-10:]
print 'BREAK'
print Xpos_ABS[0:10], table.field('Xpos_ABS')[0:10]#, Ypos[0:10], table.field('Xpos_ABS')[0:10],
print 'BREAK'
print Ypos_ABS[0:10], table.field('Ypos_ABS')[0:10]#, Ypos[0:10], table.field('Ypos_ABS')[0:10],
print SUPA, search_params['pasted_cat']
hdu[2].data.field('Xpos_ABS')[:] = Xpos_ABS
hdu[2].data.field('Ypos_ABS')[:] = Ypos_ABS
table = hdu[2].data
print 'BREAK'
print Xpos_ABS[0:10], table.field('Xpos_ABS')[0:10]#, Ypos[0:10], table.field('Xpos_ABS')[0:10],
print 'BREAK'
print Ypos_ABS[0:10], table.field('Ypos_ABS')[0:10]#, Ypos[0:10], table.field('Ypos_ABS')[0:10],
print SUPA, search_params['pasted_cat']
os.system('rm ' + search_params['pasted_cat'])
hdu.writeto(search_params['pasted_cat'])
save_exposure({'fixchips':1},SUPA,FLAT_TYPE)
def plot_chips(x1,y1):
x2 = x1
y2 = y1
#x1, y1 = length('SUPA0002955','DOMEFLAT')
#x2, y2 = length('SUPA0002956','DOMEFLAT')
import numpy, math, pyfits, os
import copy
from ppgplot import *
pgbeg("/XTERM",1,1)
pgiden()
pgpanl(1,1)
from Numeric import *
x = copy.copy(x1) #hdulist1["OBJECTS"].data.field(color1which)
y = copy.copy(y1) #hdulist1["OBJECTS"].data.field(compband+'mag') - data
plotx1 = array(copy.copy(x1))
ploty1 = array(copy.copy(y1))
plotx2 = array(copy.copy(x2))
ploty2 = array(copy.copy(y2))
x.sort()
y.sort()
pgswin(x[0]-200,x[-1]+200,y[0]-200,y[-1]+200)
#pylab.scatter(z,x)
pglab('Mag','Mag - Mag(Inst)')
#print plotx, ploty
pgpt(plotx1,ploty1,3)
pgpt(plotx2,ploty2,5)
pgbox()
pgend()
def sdss_coverage(SUPA,FLAT_TYPE):
import commands, string
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
print 'CRVAL1', search_params['CRVAL1'], search_params['CRVAL1'] == 'None'
#if str(search_params['CRVAL1']) == 'None':
# print search_params['FLAT_TYPE'], 'FLAT_TYPE'
if search_params['CRVAL1'] is None:
length(search_params['SUPA'],search_params['FLAT_TYPE'])
dict = get_files(SUPA,FLAT_TYPE)
search_params.update(dict)
print search_params['CRVAL1']
crval1 = float(search_params['CRVAL1'])
crval2 = float(search_params['CRVAL2'])
query = 'select ra, dec from star where ra between ' + str(crval1-0.1) + ' and ' + str(crval1+01) + ' and dec between ' + str(crval2-0.1) + ' and ' + str(crval2+0.1)
print query
import sqlcl
lines = sqlcl.query(query).readlines()
print lines
if len(lines) > 1: sdss_coverage=True
else: sdss_coverage=False
save_exposure({'sdss_coverage':sdss_coverage},SUPA,FLAT_TYPE)
return sdss_coverage
def sextract(SUPA,FLAT_TYPE):
import os, re, utilities, bashreader, sys, string
from copy import copy
from glob import glob
trial = True
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
subpath='/nfs/slac/g/ki/ki05/anja/SUBARU/'
search_params['CRPIX1ZERO'] = -999
search_params['CRPIX2ZERO'] = -999
print search_params
print SUPA, FLAT_TYPE, search_params['files']
kws = utilities.get_header_kw(search_params['files'][0],['PPRUN'])
print kws['PPRUN']
pprun = kws['PPRUN']
#fs = glob.glob(subpath+pprun+'/SCIENCE_DOMEFLAT*.tarz')
#if len(fs) > 0:
# os.system('tar xzvf ' + fs[0])
#fs = glob.glob(subpath+pprun+'/SCIENCE_SKYFLAT*.tarz')
#if len(fs) > 0:
# os.system('tar xzvf ' + fs[0])
search_params['files'].sort()
children = []
if 1:
for image in search_params['files']:
ROOT = re.split('\.',re.split('\/',image)[-1])[0]
BASE = re.split('O',ROOT)[0]
NUM = re.split('O',re.split('\_',ROOT)[1])[0]
print image, search_params['CRVAL1ASTROMETRY_'+NUM]
for image in search_params['files']:
print image
child = False
if not trial:
child = os.fork()
if child:
children.append(child)
if not child:
try:
params = copy(search_params)
ROOT = re.split('\.',re.split('\/',image)[-1])[0]
params['ROOT'] = ROOT
BASE = re.split('O',ROOT)[0]
params['BASE'] = BASE
NUM = re.split('O',re.split('\_',ROOT)[1])[0]
params['NUM'] = NUM
print NUM, BASE, ROOT
params['GAIN'] = 2.50 ## WARNING!!!!!!
print ROOT
finalflagim = "%(TEMPDIR)sflag_%(ROOT)s.fits" % params
res = re.split('SCIENCE',image)
res = re.split('/',res[0])
if res[-1]=='':res = res[:-1]
params['path'] = reduce(lambda x,y:x+'/'+y,res[:-1])
#params['fil_directory'] = res[-1]
weightim = "/%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits" % params
#flagim = "/%(path)s/%(fil_directory)s/WEIGHTS/globalflag_%(NUM)s.fits" % params
#finalflagim = TEMPDIR + "flag_%(ROOT)s.fits" % params
params['finalflagim'] = weightim
im = "/%(path)s/%(fil_directory)s/SCIENCE/%(ROOT)s.fits" % params
crpix = utilities.get_header_kw(im,['CRPIX1','CRPIX2'])
#if search_params['SDSS_coverage'] == 'yes': catalog = 'SDSS-R6'
#else: catalog = '2MASS'
SDSS1 = "/%(path)s/%(fil_directory)s/SCIENCE/headers_scamp_SDSS-R6/%(BASE)s.head" % params
SDSS2 = "/%(path)s/%(fil_directory)s/SCIENCE/headers_scamp_SDSS-R6/%(BASE)sO*.head" % params
SDSS1 = SDSS1.replace('I_','_')
SDSS2 = SDSS2.replace('I_','_')
from glob import glob
print SDSS1, SDSS2
print glob(SDSS1), glob(SDSS2)
head = None
if len(glob(SDSS1)) > 0:
head = glob(SDSS1)[0]
elif len(glob(SDSS2)) > 0:
head = glob(SDSS2)[0]
''' see if image has been run through astrometry.net. if not, run it. '''
if True:
os.system('mkdir -p ' + search_params['TEMPDIR'])
if 0:
if 0: #not search_params.has_key('ASTROMETRYNET_' + NUM):
save_exposure({'ASTROMETRYNET_' + NUM:'None'},SUPA,FLAT_TYPE)
dict = get_files(dict['SUPA'],dict['FLAT_TYPE'])
search_params.update(dict)
if 0: #string.find(str(search_params['CRVAL1ASTROMETRY_' + NUM]),'None') != -1: #head is None:
save_exposure({'ASTROMETRYNET_' + NUM:'yes'},SUPA,FLAT_TYPE)
imtmp = "%(TEMPDIR)s/%(ROOT)s.tmp.fits" % params
imfix = "%(TEMPDIR)s/%(ROOT)s.fixwcs.fits" % params
imwcs = "%(TEMPDIR)s/%(ROOT)s.wcsfile" % params
command = "cp " + im + " " + imtmp
print command
utilities.run(command)
os.system('rm ' + imfix)
#command = '/nfs/slac/g/ki/ki04/pkelly/astrometry/bin//solve-field --cpulimit 60 --no-verify --no-plots --overwrite --scale-units arcsecperpix --scale-low ' + str(float(params['PIXSCALE'])-0.005) + ' --scale-high ' + str(float(params['PIXSCALE'])+0.005) + ' -N ' + imfix + ' ' + imtmp
command = astrom + ' --temp-dir ' + tmpdir + ' --cpulimit 100 --no-verify --no-plots --overwrite --scale-units arcsecperpix --scale-low ' + str(float(params['PIXSCALE'])-0.005) + ' --scale-high ' + str(float(params['PIXSCALE'])+0.005) + ' -N ' + imfix + ' ' + imtmp
print command
os.system(command)
os.system('rm ' + imtmp)
from glob import glob
if len(glob(imfix)):
command = 'imhead < ' + imfix + ' > ' + imwcs
print command
os.system(command)
hf = open(imwcs,'r').readlines()
hdict = {}
for line in hf:
import re
if string.find(line,'=') != -1:
res = re.split('=',line)
name = res[0].replace(' ','')
res = re.split('/',res[1])
value = res[0].replace(' ','')
print name, value
hdict[name] = value
''' now save the wcs '''
wcsdict = {}
import commands
out = commands.getoutput('gethead ' + imfix + ' CRPIX1 CRPIX2')
import re
res = re.split('\s+',out)
os.system('sethead ' + imfix + ' CRPIX1OLD=' + res[0])
os.system('sethead ' + imfix + ' CRPIX2OLD=' + res[1])
for name in ['CRVAL1','CRVAL2','CD1_1','CD1_2','CD2_1','CD2_2','CRPIX1','CRPIX2']:
print name + 'ASTROMETRY_' + NUM, hdict[name]
wcsdict[name + 'ASTROMETRY_' + NUM] = hdict[name]
save_exposure(wcsdict,SUPA,FLAT_TYPE)
dict = get_files(dict['SUPA'],dict['FLAT_TYPE'])
search_params.update(dict)
hdict = {}
if string.find(str(search_params['CRVAL1ASTROMETRY_' + NUM]),'None') == -1: #head is None:
for name in ['CRVAL1','CRVAL2','CD1_1','CD1_2','CD2_1','CD2_2','CRPIX1','CRPIX2']:
print name + 'ASTROMETRY', search_params[name+'ASTROMETRY_' + NUM]
hdict[name] = search_params[name+'ASTROMETRY_' + NUM]
print head
raw_input()
if head is not None:
''' if no solution from astrometry.net, use the Swarp solution '''
hf = open(head,'r').readlines()
for line in hf:
import re
if string.find(line,'=') != -1:
res = re.split('=',line)
name = res[0].replace(' ','')
res = re.split('/',res[1])
value = res[0].replace(' ','')
print name, value
hdict[name] = value
imfix = "%(TEMPDIR)s/%(ROOT)s.fixwcs.fits" % params
print imfix
os.system('mkdir ' + search_params['TEMPDIR'])
command = "cp " + im + " " + imfix
print command
print 'copying file', im
utilities.run(command)
print 'finished copying'
import commands
out = commands.getoutput('gethead ' + imfix + ' CRPIX1 CRPIX2')
import re
res = re.split('\s+',out)
os.system('sethead ' + imfix + ' CRPIX1OLD=' + res[0])
os.system('sethead ' + imfix + ' CRPIX2OLD=' + res[1])
for name in ['CRVAL1','CRVAL2','CD1_1','CD1_2','CD2_1','CD2_2','CRPIX1','CRPIX2']:
command = 'sethead ' + imfix + ' ' + name + '=' + str(hdict[name])
print command
os.system(command)
else: sys.exit(0)
''' now run sextractor '''
if 1:
main_file = '%(TEMPDIR)s/%(ROOT)s.fixwcs.fits' % params
doubles_raw = [{'file_pattern':main_file,'im_type':''},]
#{'file_pattern':subpath+pprun+'/SCIENCE_DOMEFLAT*/'+BASE+'OC*.fits','im_type':'D'},
#{'file_pattern':subpath+pprun+'/SCIENCE_SKYFLAT*/'+BASE+'OC*.fits','im_type':'S'}]
#{'file_pattern':subpath+pprun+'/SCIENCE/OC_IMAGES/'+BASE+'OC*.fits','im_type':'OC'}
# ]
print doubles_raw
doubles_output = []
print doubles_raw
for double in doubles_raw:
file = glob(double['file_pattern'])
if len(file) > 0:
params.update(double)
params['double_cat'] = '%(TEMPDIR)s/%(ROOT)s.%(im_type)s.fixwcs.cat' % params
params['file_double'] = file[0]
#print params
#for par in ['fwhm','GAIN']:
# print par, type(params[par]), params[par]
command = "sex %(TEMPDIR)s%(ROOT)s.fixwcs.fits,%(file_double)s -c %(PHOTCONF)s/phot.conf.sex \
-PARAMETERS_NAME %(PHOTCONF)s/phot.param.sex \
-CATALOG_NAME %(double_cat)s \
-FILTER_NAME %(DATACONF)s/default.conv\
-FILTER Y \
-FLAG_TYPE MAX\
-FLAG_IMAGE ''\
-SEEING_FWHM %(fwhm).3f \
-DETECT_MINAREA 3 -DETECT_THRESH 3 -ANALYSIS_THRESH 3 \
-MAG_ZEROPOINT 27.0 \
-GAIN %(GAIN).3f \
-WEIGHT_IMAGE /%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits\
-WEIGHT_TYPE MAP_WEIGHT" % params
#-CHECKIMAGE_TYPE BACKGROUND,APERTURES,SEGMENTATION\
#-CHECKIMAGE_NAME /%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.background.fits,/%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.apertures.fits,/%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.segmentation.fits\
catname = "%(TEMPDIR)s/%(ROOT)s.cat" % params
print command
utilities.run(command,[catname])
command = 'ldacconv -b 1 -c R -i ' + params['double_cat'] + ' -o ' + params['double_cat'].replace('cat','rawconv')
print command
utilities.run(command)
#command = 'ldactoasc -b -q -i ' + params['double_cat'].replace('cat','rawconv') + ' -t OBJECTS\
# -k ALPHA_J2000 DELTA_J2000 > ' + params['double_cat'].replace('cat','pos')
#print command
#utilities.run(command)
#print 'mkreg.pl -c -rad 8 -xcol 0 -ycol 1 -wcs -colour green ' + params['double_cat'].replace('cat','pos')
#utilities.run(command)
#print params['double_cat'].replace('cat','pos')
# Xpos_ABS is difference of CRPIX and zero CRPIX
doubles_output.append({'cat':params['double_cat'].replace('cat','rawconv'),'im_type':double['im_type']})
print doubles_output
print '***********************************'
outfile = params['TEMPDIR'] + params['ROOT'] + '.conv'
combine_cats(doubles_output,outfile,search_params)
#outfile_field = params['TEMPDIR'] + params['ROOT'] + '.field'
#command = 'ldacdeltab -i ' + outfile + ' -t FIELDS -o ' + outfile_field
#utilities.run(command)
command = 'ldactoasc -b -q -i ' + outfile + ' -t OBJECTS\
-k ALPHA_J2000 DELTA_J2000 > ' + outfile.replace('conv','pos')
print command
utilities.run(command)
command = 'mkreg.pl -c -rad 8 -xcol 0 -ycol 1 -wcs -colour green ' + outfile.replace('conv','pos')
print command
utilities.run(command)
print outfile
command = 'ldaccalc -i ' + outfile + ' -o ' + params['TEMPDIR'] + params['ROOT'] + '.newpos -t OBJECTS -c "(Xpos + ' + str(float(search_params['CRPIX1ZERO']) - float(crpix['CRPIX1'])) + ');" -k FLOAT -n Xpos_ABS "" -c "(Ypos + ' + str(float(search_params['CRPIX2ZERO']) - float(crpix['CRPIX2'])) + ');" -k FLOAT -n Ypos_ABS "" -c "(Ypos*0 + ' + str(params['NUM']) + ');" -k FLOAT -n CHIP "" '
print command
utilities.run(command)
except:
print traceback.print_exc(file=sys.stdout)
sys.exit(0)
if not trial:
sys.exit(0)
# print sys.exc_info()
# print 'finishing'
# sys.exit(0)
#sys.exit(0)
print children
for child in children:
print 'waiting for', child
os.waitpid(child,0)
print 'finished waiting'
pasted_cat = path + 'PHOTOMETRY/ILLUMINATION/' + 'pasted_' + SUPA + '_' + search_params['filter'] + '_' + str(search_params['ROTATION']) + '.cat'
print pasted_cat
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/')
from glob import glob
outcat = search_params['TEMPDIR'] + 'tmppaste_' + SUPA + '.cat'
newposlist = glob(search_params['TEMPDIR'] + SUPA + '*newpos')
print search_params['TEMPDIR'] + SUPA + '*newpos'
if len(newposlist) > 1:
#command = 'ldacpaste -i ' + search_params['TEMPDIR'] + SUPA + '*newpos -o ' + pasted_cat
#print command
files = glob(search_params['TEMPDIR'] + SUPA + '*newpos')
print files, search_params['TEMPDIR'] + SUPA + '*newpos'
paste_cats(files,pasted_cat)
else:
command = 'cp ' + newposlist[0] + ' ' + pasted_cat
utilities.run(command)
save_exposure({'pasted_cat':pasted_cat,'resam':0},SUPA,FLAT_TYPE)
command = "rm -rf " + search_params['TEMPDIR']
os.system(command)
#fs = glob.glob(subpath+pprun+'/SCIENCE_DOMEFLAT*.tarz'.replace('.tarz',''))
#if len(fs) > 0:
# os.system('tar xzvf ' + fs[0])
#fs = glob.glob(subpath+pprun+'/SCIENCE_SKYFLAT*.tarz'.replace('.tarz',''))
#fs = glob.glob(subpath+pprun+'/SCIENCE_SKYFLAT*.tarz')
#if len(fs) > 0:
# os.system('tar xzvf ' + fs[0])
#return exposures, LENGTH1, LENGTH2
def get_sdss_obj(SUPA, FLAT_TYPE):
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
ROTATION = str(search_params['ROTATION']) #exposures[exposure]['keywords']['ROTATION']
import os
starcat ='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/PHOTOMETRY/sdssstar%(ROTATION)s.cat' % {'ROTATION':ROTATION,'OBJNAME':search_params['OBJNAME']}
galaxycat ='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/PHOTOMETRY/sdssgalaxy%(ROTATION)s.cat' % {'ROTATION':ROTATION,'OBJNAME':search_params['OBJNAME']}
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
illum_path='/nfs/slac/g/ki/ki05/anja/SUBARU/ILLUMINATION/' % {'OBJNAME':search_params['OBJNAME']}
#os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/STAR/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/GALAXY/')
from glob import glob
print starcat
for type,cat in [['star',starcat]]: #,['galaxy',galaxycat]]:
catalog = search_params['pasted_cat'] #exposures[exposure]['pasted_cat']
ramin,ramax, decmin, decmax = coordinate_limits(catalog)
limits = {'ramin':ramin-0.2,'ramax':ramax+0.2,'decmin':decmin-0.2,'decmax':decmax+0.2}
print ramin,ramax, decmin, decmax
if len(glob(cat)) == 0:
#os.system('rm ' + cat)
image = search_params['files'][0]
print image
import retrieve_test
retrieve_test.run(image,cat,type,limits)
save_exposure({'starcat':cat},SUPA,FLAT_TYPE)
def match_simple(SUPA,FLAT_TYPE):
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
ROTATION = str(search_params['ROTATION']) #exposures[exposure]['keywords']['ROTATION']
import os
starcat ='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/PHOTOMETRY/sdssstar%(ROTATION)s.cat' % {'ROTATION':ROTATION,'OBJNAME':search_params['OBJNAME']}
galaxycat ='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/PHOTOMETRY/sdssgalaxy%(ROTATION)s.cat' % {'ROTATION':ROTATION,'OBJNAME':search_params['OBJNAME']}
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
illum_path='/nfs/slac/g/ki/ki05/anja/SUBARU/ILLUMINATION/' % {'OBJNAME':search_params['OBJNAME']}
#os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/STAR/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/GALAXY/')
from glob import glob
print starcat
for type,cat in [['star',starcat]]: #,['galaxy',galaxycat]]:
catalog = search_params['pasted_cat'] #exposures[exposure]['pasted_cat']
ramin,ramax, decmin, decmax = coordinate_limits(catalog)
limits = {'ramin':ramin-0.2,'ramax':ramax+0.2,'decmin':decmin-0.2,'decmax':decmax+0.2}
print ramin,ramax, decmin, decmax
if 1: #len(glob(cat)) == 0:
#os.system('rm ' + cat)
image = search_params['files'][0]
print image
import retrieve_test
retrieve_test.run(image,cat,type,limits)
filter = search_params['filter'] #exposures[exposure]['keywords']['filter']
#GABODSID = exposures[exposure]['keywords']['GABODSID']
OBJECT = search_params['OBJECT'] #exposures[exposure]['keywords']['OBJECT']
print catalog
outcat = path + 'PHOTOMETRY/ILLUMINATION/' + type + '/' + 'matched_' + SUPA + '_' + filter + '_' + ROTATION + '_' + type + '.cat'
outcat_dir = path + 'PHOTOMETRY/ILLUMINATION/' + type + '/' + ROTATION + '/' + OBJECT + '/'
os.system('mkdir -p ' + outcat_dir)
file = 'matched_' + SUPA + '.cat'
linkdir = illum_path + '/' + filter + '/' + ROTATION + '/' + OBJECT + '/'
#outcatlink = linkdir + 'matched_' + exposure + '_' + OBJNAME + '_' + GABODSID + '.cat'
outcatlink = linkdir + 'matched_' + SUPA + '_' + search_params['OBJNAME'] + '_' + type + '.cat'
os.system('mkdir -p ' + linkdir)
os.system('rm ' + outcat)
command = 'match_simple.sh ' + catalog + ' ' + cat + ' ' + outcat
print command
os.system(command)
os.system('rm ' + outcatlink)
command = 'ln -s ' + outcat + ' ' + outcatlink
print command
os.system(command)
save_exposure({'matched_cat_' + type:outcat},SUPA,FLAT_TYPE)
print type, 'TYPE!'
print outcat, type
#exposures[exposure]['matched_cat_' + type] = outcat
#return exposures
def phot(SUPA,FLAT_TYPE):
dict = get_files(SUPA,FLAT_TYPE)
print dict.keys()
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
filter = dict['filter']
import utilities
info = {'B':{'filter':'g','color1':'gmr','color2':'umg','EXTCOEFF':-0.2104,'COLCOEFF':0.0},\
'W-J-B':{'filter':'g','color1':'gmr','color2':'umg','EXTCOEFF':-0.2104,'COLCOEFF':0.0},\
'W-J-V':{'filter':'g','color1':'gmr','color2':'rmi','EXTCOEFF':-0.1202,'COLCOEFF':0.0},\
'W-C-RC':{'filter':'r','color1':'rmi','color2':'gmr','EXTCOEFF':-0.0925,'COLCOEFF':0.0},\
'W-C-IC':{'filter':'i','color1':'imz','color2':'rmi','EXTCOEFF':-0.02728,'COLCOEFF':0.0},\
'W-S-Z+':{'filter':'z','color1':'imz','color2':'rmi','EXTCOEFF':0.0,'COLCOEFF':0.0}}
import mk_saturation_plot,os,re
os.environ['BONN_TARGET'] = search_params['OBJNAME']
os.environ['INSTRUMENT'] = 'SUBARU'
stars_0 = []
stars_90 = []
ROTATION = dict['ROTATION']
print ROTATION
import os
ppid = str(os.getppid())
from glob import glob
for im_type in ['']: #,'D','S']:
for type in ['star']: #,'galaxy']:
file = dict['matched_cat_' + type]
print file
print file
if type == 'galaxy':
mag='MAG_AUTO' + im_type
magerr='MAGERR_AUTO' + im_type
class_star = "<0.9"
if type == 'star':
mag='MAG_APER2' + im_type
magerr='MAGERR_APER2' + im_type
class_star = ">0.9"
print 'filter', filter
os.environ['BONN_FILTER'] = filter
filt = re.split('_',filter)[0]
d = info[filt]
print file
utilities.run('ldacfilter -i ' + file + ' -o ' + search_params['TEMPDIR'] + 'good.stars' + ' -t PSSC\
-c "(Flag!=-99);"',['' + search_params['TEMPDIR'] + 'good.stars'])
utilities.run('ldacfilter -i ' + search_params['TEMPDIR'] + 'good.stars -o ' + search_params['TEMPDIR'] + 'good.colors -t PSSC\
-c "((((SEx_' + mag + '!=0 AND ' + d['color1'] + '<900) AND ' + d['color1'] + '!=0) AND ' + d['color1'] + '>-900) AND ' + d['color1'] + '!=0);"',['' + search_params['TEMPDIR'] + 'good.colors'])
print '' + search_params['TEMPDIR'] + 'good.colors'
utilities.run('ldaccalc -i ' + search_params['TEMPDIR'] + 'good.colors -t PSSC -c "(' + d['filter'] + 'mag - SEx_' + mag + ');" -k FLOAT -n magdiff "" -o ' + search_params['TEMPDIR'] + 'all.diffA.cat' ,[search_params['TEMPDIR'] + 'all.diffA.cat'] )
median = get_median('' + search_params['TEMPDIR'] + 'all.diffA.cat','magdiff')
utilities.run('ldacfilter -i ' + search_params['TEMPDIR'] + 'all.diffA.cat -o ' + search_params['TEMPDIR'] + 'all.diffB.cat -t PSSC\
-c "((magdiff > ' + str(median -1.25) + ') AND (magdiff < ' + str(median + 1.25) + '));"',['' + search_params['TEMPDIR'] + 'good.colors'])
utilities.run('ldaccalc -i ' + search_params['TEMPDIR'] + 'all.diffB.cat -t PSSC -c "(SEx_MaxVal + SEx_BackGr);" -k FLOAT -n MaxVal "" -o ' + search_params['TEMPDIR'] + 'all.diff.cat' ,['' + search_params['TEMPDIR'] + 'all.diff.cat'] )
command = 'ldactoasc -b -q -i ' + search_params['TEMPDIR'] + 'all.diff.cat -t PSSC -k SEx_' + mag + ' ' + d['filter'] + 'mag SEx_FLUX_RADIUS ' + im_type + ' SEx_CLASS_STAR' + im_type + ' ' + d['filter'] + 'err ' + d['color1'] + ' MaxVal > ' + search_params['TEMPDIR'] + 'mk_sat_all'
#print command
#raw_input()
utilities.run(command,['' + search_params['TEMPDIR'] + 'mk_sat_all'] )
import commands
length = commands.getoutput('wc -l ' + search_params['TEMPDIR'] + 'mk_sat_all')
print 'TOTAL # of STARS:', length
cuts_to_make = ['MaxVal>27500.0','Clean!=1','SEx_IMAFLAGS_ISO'+im_type + '!=0','SEx_CLASS_STAR'+im_type+ class_star,'SEx_Flag'+im_type+'!=0',]
files = ['' + search_params['TEMPDIR'] + 'mk_sat_all']
titles = ['raw']
for cut in cuts_to_make:
#print 'making cut:', cut
cut_name = cut.replace('>','').replace('<','')
os.system('rm ' + cut_name)
command = 'ldacfilter -i ' + search_params['TEMPDIR'] + 'all.diff.cat -o ' + search_params['TEMPDIR'] + '' + cut_name + ' -t PSSC\
-c "(' + cut + ');"'
utilities.run(command,['' + search_params['TEMPDIR'] + '' + cut_name])
import glob
#print len(glob.glob('' + search_params['TEMPDIR'] + '' + cut_name)), glob.glob('' + search_params['TEMPDIR'] + '' + cut_name)
if len(glob.glob('' + search_params['TEMPDIR'] + '' + cut_name)) > 0:
utilities.run('ldactoasc -b -q -i ' + search_params['TEMPDIR'] + '' + cut_name + ' -t PSSC\
-k SEx_' + mag + ' ' + d['filter'] + 'mag SEx_FLUX_RADIUS SEx_CLASS_STAR ' + d['filter'] + 'err ' + d['color1'] + ' > ' + search_params['TEMPDIR'] + '' + cut_name + '.cat',['' + search_params['TEMPDIR'] + '' + cut_name + '.cat'])
length = commands.getoutput('wc -l ' + search_params['TEMPDIR'] + '' + cut_name + '.cat')
print 'TOTAL # of STARS CUT:', length
titles.append(cut_name)
files.append('' + search_params['TEMPDIR'] + '' + cut_name + '.cat')
#run('ldactoasc -b -q -i cutout1.' + ppid + ' -t PSSC\
# -k Ra Dec > ' + search_params['TEMPDIR'] + '' + outfile,['' + search_params['TEMPDIR'] + '' + outfile])
#run('mkreg.pl -c -rad 8 -xcol 0 -ycol 1 -wcs -colour ' + color + ' ' + search_params['TEMPDIR'] + '' + outfile)
utilities.run('ldacfilter -i ' + search_params['TEMPDIR'] + 'all.diff.cat -o ' + search_params['TEMPDIR'] + 'good.stars -t PSSC\
-c "(MaxVal<27500 AND SEx_IMAFLAGS_ISO'+im_type+'=0);"',['' + search_params['TEMPDIR'] + 'good.stars'])
#-c "((MaxVal<27500 AND SEx_CLASS_STAR'+im_type+class_star + ') AND SEx_IMAFLAGS_ISO'+im_type+'=0);"',['' + search_params['TEMPDIR'] + 'good.stars'])
#-c "(MaxVal<27500 AND SEx_IMAFLAGS_ISO'+im_type+'=0);"',['' + search_params['TEMPDIR'] + 'good.stars' + ppid])
utilities.run('ldactoasc -b -q -i ' + search_params['TEMPDIR'] + 'good.stars -t PSSC\
-k SEx_' + mag + ' ' + d['filter'] + 'mag SEx_FLUX_RADIUS' + im_type + ' SEx_CLASS_STAR'+im_type+' ' + d['filter'] + 'err ' + d['color1'] + ' > ' + search_params['TEMPDIR'] + 'mk_sat',['' + search_params['TEMPDIR'] + 'mk_sat'])
if len(glob.glob('' + search_params['TEMPDIR'] + 'mk_sat')) > 0:
files.append('' + search_params['TEMPDIR'] + 'mk_sat')
titles.append('filtered')
print files, titles
mk_saturation_plot.mk_saturation_all(files,titles,filter)
#cutout('' + search_params['TEMPDIR'] + 'good.stars' + ppid,mag)
print mag
val = raw_input("Look at the saturation plot?")
if len(val)>0:
if val[0] == 'y' or val[0] == 'Y':
mk_saturation_plot.mk_saturation(search_params['TEMPDIR'] + '/mk_sat',filter)
val = raw_input("Make a box?")
if len(val)>0:
if val[0] == 'y' or val[0] == 'Y':
mk_saturation_plot.use_box(filter)
lower_mag,upper_mag,lower_diff,upper_diff = re.split('\s+',open('box' + filter,'r').readlines()[0])
utilities.run('ldacfilter -i ' + search_params['TEMPDIR'] + '/good.stars -t PSSC\
-c "(((SEx_' + mag + '>' + lower_mag + ') AND (SEx_' + mag + '<' + upper_mag + ')) AND (magdiff>' + lower_diff + ')) AND (magdiff<' + upper_diff + ');"\
-o ' + search_params['TEMPDIR'] + '/filt.mag.new.cat',[search_params['TEMPDIR'] + '/filt.mag.new.cat'])
raw_input()
os.system('mv ' + search_params['TEMPDIR'] + '/filt.mag.new.cat ' + search_params['TEMPDIR'] + '/good.stars')
#val = []
#val = raw_input("Look at the saturation plot?")
#if len(val)>0:
# if val[0] == 'y' or val[0] == 'Y':
# mk_saturation_plot.mk_saturation('' + search_params['TEMPDIR'] + 'mk_sat' + ppid,filter)
# make stellar saturation plot
#lower_mag,upper_mag,lower_diff,upper_diff = re.split('\s+',open('box' + filter,'r').readlines()[0])
lower_mag = str(10)
upper_mag = str(14.0)
lower_diff = str(5)
upper_diff = str(9)
if type == 'star':
lower_mag = str(13.2)
utilities.run('ldactoasc -b -q -i ' + search_params['TEMPDIR'] + 'good.stars -t PSSC -k SEx_Xpos_ABS SEx_Ypos_ABS > ' + search_params['TEMPDIR'] + 'positions',[search_params['TEMPDIR'] + 'positions'] )
utilities.run('ldacaddkey -i ' + search_params['TEMPDIR'] + 'good.stars -o ' + search_params['TEMPDIR'] + 'filt.airmass.cat -t PSSC -k AIRMASS 0.0 FLOAT "" ',[search_params['TEMPDIR'] + 'filt.airmass.cat'] )
utilities.run('ldacfilter -i ' + search_params['TEMPDIR'] + 'filt.airmass.cat -o ' + search_params['TEMPDIR'] + 'filt.crit.cat -t PSSC\
-c "((magdiff>-900) AND magdiff<900) AND SEx_' + mag + '!=0) ;"',['' + search_params['TEMPDIR'] + 'filt.crit.cat'])
utilities.run('ldacfilter -i ' + search_params['TEMPDIR'] + 'filt.crit.cat -o ' + search_params['TEMPDIR'] + 'all.colors.cat -t PSSC\
-c "(((' + d['color1'] + '<900 AND ' + d['color2'] + '<900) AND ' + d['color1'] + '>-900) AND ' + d['color2'] + '>-900);"',['' + search_params['TEMPDIR'] + 'all.colors.cat'])
utilities.run('ldactoasc -b -q -i ' + search_params['TEMPDIR'] + 'all.colors.cat -t PSSC -k SEx_' + mag + ' ' + d['filter'] + 'mag ' + d['color1'] + ' ' + d['color2'] + ' AIRMASS SEx_' + magerr + ' ' + d['filter'] + 'err SEx_Xpos_ABS SEx_Ypos_ABS > ' + search_params['TEMPDIR'] + 'input.asc' ,['' + search_params['TEMPDIR'] + 'input.asc'] )
import photo_abs_new
good = photo_abs_new.run_through('illumination',infile='' + search_params['TEMPDIR'] + 'input.asc',output='' + search_params['TEMPDIR'] + 'photo_res',extcoeff=d['color1'],sigmareject=6,step='STEP_1',bandcomp=d['filter'],color1which=d['color1'],color2which=d['color2'])
import astropy, astropy.io.fits as pyfits
cols = []
for key in ['corr_data','color1_good','color2_good','magErr_good','X_good','Y_good','airmass_good']:
cols.append(pyfits.Column(name=key, format='E',array=good[key]))
hdu = pyfits.PrimaryHDU()
hdulist = pyfits.HDUList([hdu])
print cols
tbhu = pyfits.BinTableHDU.from_columns(cols)
hdulist.append(tbhu)
hdulist[1].header['EXTNAME']='STDTAB'
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
outcat = path + 'PHOTOMETRY/ILLUMINATION/fit_' + im_type + '_' + search_params['SUPA'] + '_' + type + '.cat'
os.system('rm ' + outcat)
hdulist.writeto(outcat)
save_exposure({'fit_cat_' + im_type + '_' + type: outcat,'airmass_add':'yes'},SUPA,FLAT_TYPE)
save_fit(good['fits'],im_type,type,SUPA,FLAT_TYPE)
def nightrun():
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
keystop = ['PPRUN']
list = reduce(lambda x,y: x + ',' + y, keystop)
command="SELECT " + list + " from illumination_db where zp_star_ is not null and PPRUN!='KEY_N/A' GROUP BY PPRUN"
print command
c.execute(command)
results=c.fetchall()
db_keys = describe_db(c)
h = []
for line in results:
dtop = {}
for i in range(len(keystop)):
dtop[keystop[i]] = line[i]
directory = 'run_' + dtop['PPRUN']
os.system('mkdir ' + os.environ['sne'] + '/plots/' + directory )
os.system('rm ' + os.environ['sne'] + '/plots/' + directory + '/*')
keys = ['OBJNAME','ROTATION']
list = reduce(lambda x,y: x + ',' + y, keys)
command="SELECT " + list + " from illumination_db where zp_star_ is not null and PPRUN='" + dtop['PPRUN'] + "' GROUP BY OBJNAME,ROTATION"
print command
c.execute(command)
results=c.fetchall()
db_keys = describe_db(c)
h = []
for line in results:
d = {}
for i in range(len(keys)):
d[keys[i]] = line[i]
if 1:
#print d
if 1:
crit = reduce(lambda x,y: x + ' AND ' + y,[str(y) + "='" + str(d[y]) + "'" for y in keys])
file = directory + '/' + reduce(lambda x,y: x + 'AND' + y,[str(y)[0:4] + "_" + str(d[y]) for y in keys])
#print crit
command = "SELECT * from illumination_db where zp_star_ is not null and " + crit
#print command
c.execute(command)
results = c.fetchall()
#print results
fit_files = []
for j in range(len(results)):
dict = {}
for i in range(len(results[j])):
dict[db_keys[i]] = results[j][i]
#print dict['SUPA'], dict['OBJNAME'], dict['pasted_cat'], dict['matched_cat_star']
fit_files.append(dict['fit_cat__star'])
#print fit_files
dict = get_files(dict['SUPA'],dict['FLAT_TYPE'])
#print dict.keys()
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
from copy import copy
import photo_abs_new
reload(photo_abs_new)
files = reduce(lambda x,y: x + ' ' + y,fit_files)
#print files
tempfile = '' + search_params['TEMPDIR'] + 'spit'
command = 'ldacpaste -i ' + files + ' -t STDTAB -o ' + tempfile
print command
utilities.run(command)
hdulist = pyfits.open(tempfile)
args = {}
for column in hdulist["STDTAB"].columns:
args[column.name] = hdulist["STDTAB"].data.field(column.name)
photo_abs_new.calcDataIllum(file,search_params['LENGTH1'], search_params['LENGTH2'], 1000, args['corr_data'], args['airmass_good'], args['color1_good'], args['color2_good'], args['magErr_good'], args['X_good'], args['Y_good'],rot=0)
#except: print 'failed'
def auto_print():
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
keys = ['FILTER','ROTATION']
list = reduce(lambda x,y: x + ',' + y, keys)
command="SELECT " + list + " from illumination_db where zp_star_ is not null and PPRUN!='KEY_N/A' and good_stars_star_ > 400 GROUP BY "+list
print command
c.execute(command)
results=c.fetchall()
db_keys = describe_db(c)
h = []
for line in results:
d = {}
for i in range(len(keys)):
d[keys[i]] = line[i]
if 1:
print d
if 1:
crit = reduce(lambda x,y: x + ' AND ' + y,[str(y) + "='" + str(d[y]) + "'" for y in keys])
file = 'filt_' + reduce(lambda x,y: x + 'AND' + y,[str(y)[0:4] + "_" + str(d[y]) for y in keys])
print crit
command = "SELECT * from illumination_db where zp_star_ is not null and " + crit
print command
c.execute(command)
results = c.fetchall()
print results
fit_files = []
for j in range(len(results)):
dict = {}
for i in range(len(results[j])):
dict[db_keys[i]] = results[j][i]
print dict['SUPA'], dict['OBJNAME'], dict['pasted_cat'], dict['matched_cat_star']
fit_files.append(dict['fit_cat__star'])
print fit_files
dict = get_files(dict['SUPA'],dict['FLAT_TYPE'])
print dict.keys()
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
from copy import copy
import photo_abs_new
reload(photo_abs_new)
files = reduce(lambda x,y: x + ' ' + y,fit_files)
print files
tempfile = '' + search_params['TEMPDIR'] + 'spit'
command = 'ldacpaste -i ' + files + ' -t STDTAB -o ' + tempfile
print command
utilities.run(command)
hdulist = pyfits.open(tempfile)
args = {}
for column in hdulist["STDTAB"].columns:
args[column.name] = hdulist["STDTAB"].data.field(column.name)
photo_abs_new.calcDataIllum(file,search_params['LENGTH1'], search_params['LENGTH2'], 1000, args['corr_data'], args['airmass_good'], args['color1_good'], args['color2_good'], args['magErr_good'], args['X_good'], args['Y_good'],rot=0)
#except: print 'failed'
def describe_db(c,db='illumination_db'):
command = "DESCRIBE " + db
print command
c.execute(command)
results = c.fetchall()
keys = []
for line in results:
keys.append(line[0])
return keys
def printer():
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
if 1: #for set in [{'OBJNAME':'HDFN', 'filters':['W-J-B','W-J-V','W-C-RC','W-C-IC','W-S-Z+']},{'OBJNAME':'MACS2243-09', 'filters':['W-J-V','W-C-RC','W-C-IC','W-S-Z+']},{'OBJNAME':'A2219', 'filters':['W-J-B','W-J-V','W-C-RC']}]:
#OBJNAME = set['OBJNAME']
if 1: #for filter in set['filters']:
if 1: #try:
print keys
OBJNAME = 'HDFN'
filter = 'W-C-ICSF'
ROTATION = 1
command = "select * from illumination_db where OBJNAME='" + OBJNAME + "' and filter='" + filter + "' and fit_cat_galaxy is not null and crfixed='third' and good_stars_star is not null and good_stars_star>10 and ROTATION=" + str(ROTATION)
command = "select * from illumination_db where SUPA='SUPA0011022' and zp_err_galaxy_D is not null"
#command = "select * from illumination_db where OBJNAME='" + OBJNAME + "' and filter='" + filter + "' and fit_cat_galaxy is not null and crfixed='third' and ROTATION=" + str(ROTATION) + ' and good_stars_star is not null and good_stars_star>10'
command = "SELECT * from illumination_db where zp_star_ is not null and ROTATION='0'" # where OBJNAME='HDFN' and filter='W-J-V' and ROTATION=0"
print command
c.execute(command)
results = c.fetchall()
fit_files = []
for j in range(len(results)):
dict = {}
for i in range(len(results[j])):
dict[keys[i]] = results[j][i]
print dict['SUPA'], dict['OBJNAME'], dict['pasted_cat'], dict['matched_cat_star']
fit_files.append(dict['fit_cat__star'])
print fit_files
dict = get_files(dict['SUPA'],dict['FLAT_TYPE'])
print dict.keys()
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
from copy import copy
import photo_abs_new
reload(photo_abs_new)
files = reduce(lambda x,y: x + ' ' + y,fit_files)
print files
tempfile = '' + search_params['TEMPDIR'] + 'spit'
command = 'ldacpaste -i ' + files + ' -t STDTAB -o ' + tempfile
print command
utilities.run(command)
hdulist = pyfits.open(tempfile)
args = {}
for column in hdulist["STDTAB"].columns:
args[column.name] = hdulist["STDTAB"].data.field(column.name)
file = OBJNAME + '_' + filter + '_' + str(ROTATION)
file = raw_input('filename?')
photo_abs_new.calcDataIllum(file,search_params['LENGTH1'], search_params['LENGTH2'], 1000, args['corr_data'], args['airmass_good'], args['color1_good'], args['color2_good'], args['magErr_good'], args['X_good'], args['Y_good'],rot=0)
#except: print 'failed'
#filter = 'W-C-IC'
import pickle
#filters = ['W-J-B','W-J-V','W-C-RC','W-C-IC','W-S-Z+']
#for filter in filters:
# exposures_zero = {}
# exposures_one = {}
# print '$$$$$'
# print 'separating into different camera rotations'
# for exposure in exposures.keys():
# print exposure,exposures[exposure]['keywords']['ROTATION']
# if int(exposures[exposure]['keywords']['ROTATION']) == 1:
# exposures_one[exposure] = exposures[exposure]
# if int(exposures[exposure]['keywords']['ROTATION']) == 0:
# exposures_zero[exposure] = exposures[exposure]
if 0:
reopen = 0
save = 0
if reopen:
f = open('' + search_params['TEMPDIR'] + 'tmppickle' + OBJNAME + filter,'r')
m = pickle.Unpickler(f)
exposures, LENGTH1, LENGTH2 = m.load()
print image.latest
if 1: images = gather_exposures(filter,OBJNAME)
print images
''' strip down exposure list '''
for key in exposures.keys():
print exposures[key]['images']
for image in exposures:
if 1: image.find_seeing(exposures) # save seeing info?
if 1: image.sextract(exposures)
if 1: image.match_simple(exposures,OBJNAME)
if 1: image.phot(exposures,filter,type,LENGTH1,LENGTH2)
if save:
f = open('' + search_params['TEMPDIR'] + 'tmppickle' + OBJNAME + filter,'w')
m = pickle.Pickler(f)
pickle.dump([exposures,LENGTH1,LENGTH2],m)
f.close()
def get_sdss(dict):
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
import os
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
starcat ='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/PHOTOMETRY/sdssstar%(ROTATION)s.cat' % {'ROTATION':search_params['ROTATION'],'OBJNAME':search_params['OBJNAME']}
galaxycat ='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/PHOTOMETRY/sdssgalaxy%(ROTATION)s.cat' % {'ROTATION':search_params['ROTATION'],'OBJNAME':search_params['OBJNAME']}
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
illum_path='/nfs/slac/g/ki/ki05/anja/SUBARU/ILLUMINATION/' % {'OBJNAME':search_params['OBJNAME']}
#os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/STAR/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/GALAXY/')
from glob import glob
print starcat
for type,cat in [['star',starcat]]: #,['galaxy',galaxycat]]:
catalog = search_params['pasted_cat'] #exposures[exposure]['pasted_cat']
ramin,ramax, decmin, decmax = coordinate_limits(catalog)
limits = {'ramin':ramin-0.2,'ramax':ramax+0.2,'decmin':decmin-0.2,'decmax':decmax+0.2}
print ramin,ramax, decmin, decmax
if 1: #len(glob(cat)) == 0:
#os.system('rm ' + cat)
image = search_params['files'][0]
print image
import retrieve_test
retrieve_test.run(image,cat,type,limits)
return starcat
def match_OBJNAME(SDSS=False):
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
db_keys = describe_db(c)
keystop = ['PPRUN','ROTATION','OBJNAME']
list = reduce(lambda x,y: x + ',' + y, keystop)
command="SELECT * from illumination_db where zp_star_ is not null and PPRUN='2002-06-04_W-J-V' and OBJECT='MACSJ1423.8' GROUP BY OBJNAME,ROTATION"
#command="SELECT * from illumination_db where OBJNAME like '%2243%' and filter='W-J-V' GROUP BY OBJNAME,pprun,filter "
#command="SELECT * from illumination_db where file not like '%CALIB%' and OBJECT like '%1423%' GROUP BY OBJNAME,pprun,filter"
command="SELECT * from illumination_db where file not like '%CALIB%' GROUP BY OBJNAME,pprun,filter"
print command
c.execute(command)
results=c.fetchall()
for line in results:
try:
dtop = {}
for i in range(len(db_keys)):
dtop[db_keys[i]] = str(line[i])
res = re.split('\/',dtop['file'])
for j in range(len(res)):
if res[j] == 'SUBARU':
break
CLUSTER = res[j+1]
print CLUSTER
FILTER = dtop['filter']
PPRUN = dtop['PPRUN']
save_fit({'PPRUN':PPRUN,'CLUSTER':CLUSTER,'FILTER':FILTER})
keys = ['SUPA','OBJNAME','ROTATION','PPRUN','pasted_cat','filter','ROTATION','files']
list = reduce(lambda x,y: x + ',' + y, keys)
#command="SELECT * from illumination_db where zp_star_ is not null and OBJNAME='"+dtop['OBJNAME'] + "' and PPRUN='" + dtop['PPRUN'] + "'"#+ "' GROUP BY OBJNAME,ROTATION"
command="SELECT * from illumination_db where CLUSTER='" + dtop['OBJNAME'] + "' and PPRUN='" + dtop['PPRUN'] + "'"#+ "' GROUP BY OBJNAME,ROTATION"
print command
c.execute(command)
results=c.fetchall()
print results
#raw_input()
field = []
info = []
for line in results:
d = {}
for i in range(len(db_keys)):
d[db_keys[i]] = str(line[i])
ana = '' #raw_input('analyze ' + d['SUPA'] + '?')
if len(ana) > 0:
if ana[0] == 'y':
analyze(d['SUPA'],d['FLAT_TYPE'])
key = str(int(float(d['ROTATION']))) + '$' + d['SUPA'] + '$'
field.append({'key':key,'pasted_cat':d['pasted_cat']})
info.append([d['ROTATION'],d['SUPA'],d['OBJNAME']])
if 0:
linear_fit(CLUSTER,FILTER,PPRUN)
#if len(results) > 0:
if 1:
if d['CRVAL1'] == 'None':
length(d['SUPA'],d['FLAT_TYPE'])
cov = sdss_coverage(d['SUPA'],d['FLAT_TYPE'])
''' get SDSS matched stars, use photometric calibration to remove color term '''
if cov: #SDSS:
if 1: #d['starcat'] == 'None':
get_sdss_obj(d['SUPA'],d['FLAT_TYPE'])
if d['sdssmatch'] == 'None':
apply_photometric_calibration(d['SUPA'],d['FLAT_TYPE'])
print 'calibration done'
d = get_files(d['SUPA'],d['FLAT_TYPE'])
print d
#a = raw_input('match?')
#if 1: #len(a) > 0:
# if 1: #a[0] == 'y':
#sdss = get_sdss(d)
print field
input = [[x['pasted_cat'],x['key']] for x in field]
print input
print len(input)
if len(input) > 6:
input_short = []
rot0 = filter(lambda x:x[1][0]=='0',input)[0:3]
rot1 = filter(lambda x:x[1][0]=='1',input)[0:3]
input = rot0 + rot1
print 'new', input
print input
print len(input)
#input = add_correction(input)
print input
if cov:
input.append([d['sdssmatch'],'SDSS'])
print input,cov
match_many(input)
linear_fit(CLUSTER,FILTER,PPRUN)
#script = reduce(lambda x,y: x + ' ' + y,[x['pasted_cat'] + ' ' + x['key'] for x in field])
print '\n\nDONE'
except KeyboardInterrupt:
raise
except:
print 'fail'
ppid_loc = str(os.getppid())
print sys.exc_info()
def add_gradient(cat_list):
import astropy, astropy.io.fits as pyfits, os
cat_grads = []
for cat in cat_list:
print cat
p = pyfits.open(cat[0])
tab = p["OBJECTS"].data
print tab.field('MAG_AUTO')[0:10]
tab.field('MAG_AUTO')[:] = tab.field('MAG_AUTO') + 5./10000.*tab.field('Xpos_ABS')
new_name = cat[0].replace('.cat','.gradient.cat')
os.system('rm ' + new_name)
p.writeto(new_name)
cat_grads.append([new_name,cat[1]])
return cat_grads
def add_correction(cat_list):
import astropy, astropy.io.fits as pyfits, os
cat_grads = []
EXPS = getTableInfo()
cheby_x = [{'n':'0x','f':lambda x,y:1.},{'n':'1x','f':lambda x,y:x},{'n':'2x','f':lambda x,y:2*x**2-1},{'n':'3x','f':lambda x,y:4*x**3.-3*x}]
cheby_y = [{'n':'0y','f':lambda x,y:1.},{'n':'1y','f':lambda x,y:y},{'n':'2y','f':lambda x,y:2*y**2-1},{'n':'3y','f':lambda x,y:4*y**3.-3*y}]
#func = lambda x,y: [cheby_x_dict[f[0:2]](x,y)*cheby_y_dict[f[2:]](x,y) for f in fitvars]
import scipy
x = scipy.array([-0.5,0,1])
y = scipy.array([-0.5,0,0.5])
for cat in cat_list:
for ROT in EXPS.keys():
for SUPA in EXPS[ROT]:
import re
print SUPA, cat
res = re.split('$',cat[1])
file = res[1]
print file, cat
if file == SUPA: rotation = ROT
import pickle
f=open('/tmp/fitvars' + rotation,'r')
m=pickle.Unpickler(f)
fitvars=m.load()
cheby_terms = []
for tx in cheby_x:
for ty in cheby_y:
if fitvars.has_key(tx['n']+ty['n']): # not ((tx['n'] == '0x' and ty['n'] == '0y')): # or (tx['n'] == '0x' and ty['n'] == '1y') or (tx['n'] == '1x' and ty['n'] == '0y')) :
cheby_terms.append({'n':tx['n'] + ty['n'],'fx':tx['f'],'fy':ty['f']})
print EXPS
print cat
p = pyfits.open(cat[0])
tab = p["OBJECTS"].data
print tab.field('MAG_AUTO')[0:10]
x = coord_conv_x(tab.field('Xpos_ABS'))
y = coord_conv_y(tab.field('Ypos_ABS'))
epsilon = 0
for term in cheby_terms:
epsilon += fitvars[term['n']]*term['fx'](x,y)*term['fy'](x,y)
print epsilon[0:20]
tab.field('MAG_AUTO')[:] = tab.field('MAG_AUTO')[:] - epsilon
print tab.field('MAG_AUTO')[0:20]
new_name = cat[0].replace('.cat','.gradient.cat')
os.system('rm ' + new_name)
p.writeto(new_name)
cat_grads.append([new_name,cat[1]])
return cat_grads
def make_ssc_config(list):
ofile = '/tmp/tmp.cat'
out = open('/tmp/tmp.ssc','w')
import os, string, re
keys = []
i = -1
for file_name,prefix in list:
i += 1
print file_name
os.system('ldacdesc -t OBJECTS -i ' + file_name + ' > ' + ofile)
file = open(ofile,'r').readlines()
for line in file:
if string.find(line,"Key name") != -1:
red = re.split('\.+',line)
key = red[1].replace(' ','').replace('\n','')
out_key = prefix + key
out.write("COL_NAME = " + out_key + '\nCOL_INPUT = ' + key + '\nCOL_MERGE = AVE_REG\nCOL_CHAN = ' + str(i) + "\n#\n")
#print key
keys.append(key)
out.close()
def make_ssc_config_few(list):
ofile = '/tmp/tmp.cat'
out = open('/tmp/tmp.ssc','w')
import os, string, re
key_list = ['Flag','MAG_AUTO','MAGERR_AUTO','MAG_APER2','MAGERR_APER2','Xpos_ABS','Ypos_ABS','CLASS_STAR','MaxVal','BackGr','stdMag_corr','stdMagErr_corr','stdMagColor_corr','stdMagClean_corr']
keys = []
i = -1
for file_name,prefix in list:
i += 1
print file_name
os.system('ldacdesc -t OBJECTS -i ' + file_name + ' > ' + ofile)
file = open(ofile,'r').readlines()
for line in file:
if string.find(line,"Key name") != -1 :
red = re.split('\.+',line)
key = red[1].replace(' ','').replace('\n','')
out_key = prefix + key
if reduce(lambda x,y: x+ y, [string.find(out_key,k)!=-1 for k in key_list]):
out.write("COL_NAME = " + out_key + '\nCOL_INPUT = ' + key + '\nCOL_MERGE = AVE_REG\nCOL_CHAN = ' + str(i) + "\n#\n")
#print key
keys.append(key)
out.close()
def make_ssc_config_colors(list):
ofile = '/tmp/tmp.cat'
out = open('/tmp/tmp.ssc','w')
import os, string, re
keys = []
i = -1
for file_name,prefix in list:
i += 1
print file_name
os.system('ldacdesc -t OBJECTS -i ' + file_name + ' > ' + ofile)
file = open(ofile,'r').readlines()
for line in file:
if string.find(line,"Key name") != -1:
red = re.split('\.+',line)
key = red[1].replace(' ','').replace('\n','')
out_key = key + '_' + prefix
out.write("COL_NAME = " + out_key + '\nCOL_INPUT = ' + key + '\nCOL_MERGE = AVE_REG\nCOL_CHAN = ' + str(i) + "\n#\n")
#print key
keys.append(key)
out.close()
def threesec():
list = [['/nfs/slac/g/ki/ki05/anja/SUBARU/MACS0417-11/PHOTOMETRY/ILLUMINATION/pasted_SUPA0105807_W-C-RC_2009-01-23_CALIB_0.0.cat','W-C-RC'],['/nfs/slac/g/ki/ki05/anja/SUBARU/MACS0417-11/PHOTOMETRY/ILLUMINATION/pasted_SUPA0105787_W-J-V_2009-01-23_CALIB_0.0.cat','W-J-V'],['/nfs/slac/g/ki/ki05/anja/SUBARU/MACS0417-11/PHOTOMETRY/ILLUMINATION/pasted_SUPA0050786_W-C-IC_2006-12-21_CALIB_0.0.cat','W-C-IC']]
match_many(list,True)
def match_many(list,color=False):
if color:
make_ssc_config_colors(list)
print color
else:
make_ssc_config_few(list)
import os
files = []
for file,prefix in list:
print file
command = 'ldacaddkey -i %(inputcat)s -t OBJECTS -o %(outputcat)s -k A_WCS_assoc 0.0003 FLOAT "" \
B_WCS_assoc 0.0003 FLOAT "" \
Theta_assoc 0.0 FLOAT "" \
Flag_assoc 0 SHORT "" ' % {'inputcat':file,'outputcat':file + '.assoc1'}
os.system(command)
#command = 'ldacrenkey -i %(inputcat)s -o %(outputcat)s -k ALPHA_J2000 Ra DELTA_J2000 Dec' % {'inputcat':file + '.assoc1','outputcat':file+'.assoc2'}
#os.system(command)
files.append(file+'.assoc1')
import re
files_input = reduce(lambda x,y:x + ' ' + y,files)
os.system('mkdir /usr/work/pkelly/assoc/')
files_output = reduce(lambda x,y:x + ' ' + y,['/usr/work/pkelly/assoc/'+re.split('\/',z)[-1] +'.assd' for z in files])
print files
print files_input, files_output
command = 'associate -i %(inputcats)s -o %(outputcats)s -t OBJECTS -c ' + os.environ['bonn'] + '/photconf/fullphotom.alpha.associate' % {'inputcats':files_input,'outputcats':files_output}
print command
os.system(command)
outputcat = '/tmp/final.cat'
command = 'make_ssc -i %(inputcats)s \
-o %(outputcat)s\
-t OBJECTS -c /tmp/tmp.ssc ' % {'inputcats':files_output,'outputcat':outputcat}
os.system(command)
def match_inside(SUPA1,SUPA2,FLAT_TYPE):
dict1 = get_files(SUPA1,FLAT_TYPE)
search_params1 = initialize(dict1['filter'],dict1['OBJNAME'])
search_params1.update(dict1)
dict2 = get_files(SUPA2,FLAT_TYPE)
search_params2 = initialize(dict2['filter'],dict2['OBJNAME'])
search_params2.update(dict2)
import os
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params1['OBJNAME']}
illum_path='/nfs/slac/g/ki/ki05/anja/SUBARU/ILLUMINATION/' % {'OBJNAME':search_params1['OBJNAME']}
#os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/SELF/')
from glob import glob
catalog1 = search_params1['pasted_cat']
catalog2 = search_params2['pasted_cat']
#os.system('ldacrentab -i ' + catalog2 + ' -t OBJECTS STDTAB -o ' + catalog2.replace('cat','std.cat'))
filter = search_params1['filter'] #exposures[exposure]['keywords']['filter']
OBJECT = search_params1['OBJECT'] #exposures[exposure]['keywords']['OBJECT']
outcat = path + 'PHOTOMETRY/ILLUMINATION/SELF/matched_' + SUPA1 + '_' + filter + '_' + '_self.cat'
file = 'matched_' + SUPA1 + '.cat'
os.system('rm ' + outcat)
command = 'match_simple_cats.sh ' + catalog1 + ' ' + catalog2 + ' ' + outcat
print command
os.system(command)
save_exposure({'matched_cat_self':outcat},SUPA1,FLAT_TYPE)
print outcat
def getTableInfo():
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy , string
p = pyfits.open('/tmp/final.cat')
tbdata = p[1].data
types = []
ROTS = {}
KEYS = {}
for column in p[1].columns:
if string.find(column.name,'$') != -1:
print column
res = re.split('\$',column.name)
ROT = res[0]
IMAGE = res[1]
KEY = res[2]
if not ROTS.has_key(ROT):
ROTS[ROT] = []
if not len(filter(lambda x:x==IMAGE,ROTS[ROT])) and IMAGE!='SUPA0011082':
ROTS[ROT].append(IMAGE)
return ROTS
def diffCalcNew():
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy , string
p = pyfits.open('/tmp/final.cat')
tbdata = p[1].data
types = []
ROTS = {}
KEYS = {}
for column in p[1].columns:
if string.find(column.name,'$') != -1:
print column
res = re.split('\$',column.name)
ROT = res[0]
IMAGE = res[1]
KEY = res[2]
if not ROTS.has_key(ROT):
ROTS[ROT] = []
if not len(filter(lambda x:x==IMAGE,ROTS[ROT])):
ROTS[ROT].append(IMAGE)
print ROTS
#good = 0
#for i in range(len(tbdata)):
# array = []
# for y in ROTS[ROT]:
# array += [tbdata.field(ROT+'$'+y+'$CLASS_STAR')[i] for y in ROTS[ROT]]
# array.sort()
# if array[-1]>0.9 and array[-2]>0.9:
# good += 1
#print good, len(tbdata)
#raw_input()
def starConstruction(EXPS):
''' the top two most star-like objects have CLASS_STAR>0.9 and, for each rotation, their magnitudes differ by less than 0.01 '''
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy , string, scipy
p = pyfits.open('/tmp/final.cat')
table = p[1].data
from copy import copy
w = []
for ROT in EXPS.keys():
for y in EXPS[ROT]:
w.append(copy(table.field(ROT+'$'+y+'$MAG_AUTO')))
medians = []
stds = []
for i in range(len(w[0])):
non_zero = []
for j in range(len(w)):
if w[j][i] != 0:
non_zero.append(w[j][i])
if len(non_zero) != 0:
medians.append(float(scipy.median(non_zero)))
stds.append(float(scipy.std(non_zero)))
else:
medians.append(float(-99))
stds.append(99)
print medians[0:99]
tnew = mk_tab([[medians,'median'],[stds,'std']])
tall = merge(tnew,p)
print 'done merging'
def selectGoodStars(EXPS):
''' the top two most star-like objects have CLASS_STAR>0.9 and, for each rotation, their magnitudes differ by less than 0.01 '''
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy , string, scipy
p = pyfits.open('/tmp/final.cat')
#print p[1].columns
#raw_input()
table = p[1].data
star_good = [] #= scipy.zeros(len(table))
supas = []
from copy import copy
''' if there is an image that does not match, throw it out '''
Finished = False
while not Finished:
temp = copy(table)
for ROT in EXPS.keys():
for y in EXPS[ROT]:
mask = temp.field(ROT+'$'+y+'$MAG_AUTO') != 0.0
good_entries = temp[mask]
temp = good_entries
print len(good_entries.field(ROT+'$'+y+'$MAG_AUTO'))
mask = temp.field(ROT+'$'+y+'$MAG_AUTO') < 27
good_entries = temp[mask]
temp = good_entries
print len(good_entries.field(ROT+'$'+y+'$MAG_AUTO'))
mask = 0 < temp.field(ROT+'$'+y+'$MAG_AUTO')
good_entries = temp[mask]
temp = good_entries
print len(good_entries.field(ROT+'$'+y+'$MAG_AUTO'))
print ROT,y, temp.field(ROT+'$'+y+'$MaxVal')[0:10],temp.field(ROT+'$'+y+'$BackGr')[0:10]
mask = (temp.field(ROT+'$'+y+'$MaxVal') + temp.field(ROT+'$'+y+'$BackGr')) < 26000
good_entries = temp[mask]
temp = good_entries
good_number = len(good_entries.field(ROT+'$'+y+'$MAG_AUTO'))
print ROT,y, good_number , EXPS
if good_number == 0:
TEMP = {}
for ROTTEMP in EXPS.keys():
TEMP[ROTTEMP] = []
for yTEMP in EXPS[ROTTEMP]:
if y != yTEMP:
TEMP[ROTTEMP].append(yTEMP)
EXPS = TEMP
break
if good_number != 0:
Finished = True
print len(temp), 'temp'
zps = {}
print EXPS.keys(), EXPS
for ROT in EXPS.keys():
for y in EXPS[ROT]:
s = good_entries.field(ROT+'$'+y+'$MAG_AUTO').sum()
print s
print s/len(good_entries)
zps[y] = s/len(good_entries)
print zps
from copy import copy
tab = {}
for ROT in EXPS.keys():
for y in EXPS[ROT]:
for key in [ROT+'$'+y+'$Xpos_ABS',ROT+'$'+y+'$Ypos_ABS',ROT+'$'+y+'$MAG_AUTO',ROT+'$'+y+'$MAGERR_AUTO',ROT+'$'+y+'$MaxVal',ROT+'$'+y+'$BackGr',ROT+'$'+y+'$CLASS_STAR',ROT+'$'+y+'$Flag','SDSSstdMag_corr','SDSSstdMagErr_corr','SDSSstdMagColor_corr','SDSSstdMagClean_corr']:
tab[key] = copy(table.field(key))
for i in range(len(table)):
mags_ok = False
star_ok = False
class_star_array = []
include_star = []
in_box = []
name = []
mags_diff_array = []
mags_good_array = []
for ROT in EXPS.keys():
#for y in EXPS[ROT]:
# if table.field(ROT+'$'+y+'$MAG_AUTO')[i] != 0.0:
mags_diff_array += [zps[y] - tab[ROT+'$'+y+'$MAG_AUTO'][i] for y in EXPS[ROT]]
mags_good_array += [tab[ROT+'$'+y+'$MAG_AUTO'][i]!=0.0 for y in EXPS[ROT]]
in_box += [1500 < tab[ROT+'$'+y+'$Xpos_ABS'][i] < 8500 and 1500 < tab[ROT+'$'+y+'$Ypos_ABS'][i] < 6500]
include_star += [((tab[ROT+'$'+y+'$MaxVal'][i] + tab[ROT+'$'+y+'$BackGr'][i]) < 25000 and tab[ROT+'$'+y+'$Flag'][i]==0 and tab[ROT+'$'+y+'$MAG_AUTO'][i] < 40 and tab[ROT+'$'+y+'$MAG_AUTO'][i]!=0.0 and tab[ROT+'$'+y+'$MAGERR_AUTO'][i]<0.05) for y in EXPS[ROT]]
#for y in EXPS[ROT]:
# print (tab[ROT+'$'+y+'$MaxVal'][i] + tab[ROT+'$'+y+'$BackGr'][i]) < 27500 , tab[ROT+'$'+y+'$Flag'][i]==0 , tab[ROT+'$'+y+'$MAG_AUTO'][i] < 40 , tab[ROT+'$'+y+'$MAG_AUTO'][i]!=0.0
name += [{'name':EXPS[ROT][z],'rotation':ROT} for z in range(len(EXPS[ROT]))]
class_star_array += [tab[ROT+'$'+y+'$CLASS_STAR'][i] for y in EXPS[ROT]]
class_star_array.sort()
#if len(mags_array) > 1:
# if 1: #abs(mags_array[0] - mags_array[1]) < 0.5:
# mags_ok = True
# if 1: #abs(class_star_array[-1]) > 0.01: # MAIN PARAMETER!
# star_ok = True
if abs(class_star_array[-1]) > -9: # MAIN PARAMETER!
star_ok = True
if star_ok: #mags_ok and star_ok:
list = []
for k in range(len(mags_good_array)):
if mags_good_array[k]:
list.append(mags_diff_array[k])
if len(list) > 1:
median_mag_diff = scipy.median(list)
#print median_mag_diff, mags_diff_array, class_star_array, include_star
file_list=[]
for j in range(len(include_star)):
if include_star[j] and abs(mags_diff_array[j] - median_mag_diff) < 0.3: # MAIN PARAMETER!
file_list.append(name[j])
if tab['SDSSstdMag_corr'][i] != 0.0: sdss_exists = 1
else: sdss_exists = 0
if 40. > tab['SDSSstdMag_corr'][i] > 0.0: sdss = 1 # and tab['SDSSstdMagClean_corr'][i]==1: sdss = 1
else: sdss = 0
#if 40. > tab['SDSSstdMag_corr'][i] > 0.0: sdss = 1
if len(file_list) > 1:
star_good.append(i)
supas.append({'table index':i,'supa files':file_list, 'sdss':sdss, 'sdss_exists':sdss_exists})
if i%2000==0: print i
return EXPS, star_good, supas
def diffCalc(SUPA1,FLAT_TYPE):
dict = get_files(SUPA1,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy
print search_params['matched_cat_self']
p = pyfits.open(search_params['matched_cat_self'])
tbdata = p[1].data
mask = tbdata.field('SEx_MaxVal') + tbdata.field('SEx_BackGr') < 27500
newtbdata = tbdata[mask]
print len(newtbdata)
mask = newtbdata.field('CLASS_STAR') > 0.95
newtbdata = newtbdata[mask]
mask = abs(newtbdata.field('SEx_MAG_APER2') - newtbdata.field('MAG_APER2')) < 0.01
new2tbdata = newtbdata[mask]
print len(new2tbdata)
data = new2tbdata.field('SEx_MAG_APER2') - new2tbdata.field('MAG_APER2')
magErr = new2tbdata.field('SEx_MAGERR_APER2')
X = new2tbdata.field('Xpos_ABS')
Y = new2tbdata.field('Ypos_ABS')
file = 'test'
calcDataIllum(file,search_params['LENGTH1'], search_params['LENGTH2'],data,magErr,X,Y)
data_save = []
magErr_save = []
X_save = []
Y_save = []
for i in range(len(data)):
data_save.append([new2tbdata.field('SEx_MAG_APER2')[i],new2tbdata.field('MAG_APER2')[i]])
magErr_save.append([new2tbdata.field('SEx_MAGERR_APER2')[i],new2tbdata.field('MAGERR_APER2')[i]])
X_save.append([new2tbdata.field('Xpos_ABS')[i],new2tbdata.field('SEx_Xpos_ABS')[i]])
Y_save.append([new2tbdata.field('Ypos_ABS')[i],new2tbdata.field('SEx_Ypos_ABS')[i]])
return data_save, magErr_save, X_save, Y_save
def calcDataIllum(file, LENGTH1, LENGTH2, data,magErr, X, Y, pth='/nfs/slac/g/ki/ki04/pkelly/plots/', rot=0):
import numpy, math, pyfits, os
from ppgplot import *
#print size_x, size_y, bin, size_x/bin
x = []
y = []
z = []
zerr = []
from copy import copy
X_sort = copy(X)
Y_sort = copy(Y)
X_sort = numpy.sort(X_sort)
Y_sort = numpy.sort(Y_sort)
X_min = X_sort[0]
Y_min = Y_sort[0]
X_max = X_sort[-1]
Y_max = Y_sort[-1]
X_width = abs(X_max - X_min)
Y_width = abs(Y_max - Y_min)
nbin1 =10
nbin2 =10
LENGTH1 = LENGTH1
LENGTH2 = LENGTH2
print LENGTH1, LENGTH2
bin1 = int(LENGTH1/nbin1)
bin2 = int(LENGTH2/nbin2)
diff_weightsum = -9999*numpy.ones([nbin1,nbin2])
diff_invvar = -9999*numpy.ones([nbin1,nbin2])
X_cen = []
Y_cen = []
data_cen = []
zerr_cen = []
chisq = 0
for i in range(len(data)):
if 1: # LENGTH1*0.3 < X[i] < LENGTH1*0.6:
X_cen.append(X[i])
Y_cen.append(Y[i])
data_cen.append(data[i])
zerr_cen.append(magErr[i])
x.append(X[i])
y.append(Y[i])
z.append(data[i])
zerr.append(magErr[i])
chisq += data[i]**2./magErr[i]**2.
x_val = int((X[i])/float(bin1)) # + size_x/(2*bin)
y_val = int((Y[i])/float(bin2)) #+ size_y/(2*bin)
#print LENGTH1, LENGTH2, x_val, y_val, X[i], Y[i]
#print size_x/bin+1,size_y/bin+1, x_val, y_val, X[i], Y[i]
err = magErr[i]
''' lower limit on error '''
if err < 0.04: err = 0.04
weightsum = data[i]/err**2.
invvar = 1/err**2.
#if 1: #0 <= x_val and x_val < int(nbin1) and y_val >= 0 and y_val < int(nbin2): #0 < x_val < size_x/bin and 0 < y_val < size_y/bin:
#print x_val, y_val
try:
if diff_weightsum[x_val][y_val] == -9999:
diff_weightsum[x_val][y_val] = weightsum
diff_invvar[x_val][y_val] = invvar
#print x_val, y_val, weightsum, '!!!!!'
else:
diff_weightsum[x_val][y_val] += weightsum
diff_invvar[x_val][y_val] += invvar
except: print 'fail'
redchisq = chisq**0.5 / len(data)
print 'redchisq', redchisq
import Numeric
x_p = Numeric.array(X_cen)
y_p = Numeric.array(Y_cen)
z_p = Numeric.array(data_cen)
zerr_p = Numeric.array(zerr_cen)
x.sort()
y.sort()
z.sort()
mean = diff_weightsum/diff_invvar
print 'mean'
#print mean
err = 1/diff_invvar**0.5
print 'err'
#print err
print 'writing'
hdu = pyfits.PrimaryHDU(mean)
f = pth + file
os.system('rm ' + f + 'diffmap.fits')
hdu.writeto( f + 'diffmap.fits')
hdu = pyfits.PrimaryHDU(err)
os.system('rm ' + f + 'diffinvar.fits')
hdu.writeto( f + 'diffinvar.fits')
pgbeg(f + 'pos.ps'+"/cps",1,1)
pgiden()
#print x_p
#print z_p
#print zerr_p
#pgswin(x[0],x[-1],z[0],z[-1])
### plot positions
pgpanl(1,1)
pgswin(x[0],x[-1],y[0],y[-1])
pgbox()
pglab('X','Y',file) # label the plot
#pgsci(3)
#pgerrb(6,x_p,z_p,zerr_p)
pgpt(x_p,y_p,3)
pgend()
### plot residuals
pgbeg(f + 'diff.ps'+"/cps",1,2)
pgiden()
#print x_p
#print z_p
#print zerr_p
#pgswin(x[0],x[-1],z[0],z[-1])
pgpanl(1,1)
pgswin(x[0],x[-1],-0.005,0.005)
pgbox()
pglab('X axis','SDSS-SUBARU',file) # label the plot
#pgsci(3)
#pgerrb(6,x_p,z_p,zerr_p)
pgpt(x_p,z_p,3)
#pgswin(y[0],y[-1],z[0],z[-1])
pgpanl(1,2)
pgswin(y[0],y[-1],-0.005,0.005)
pgsci(1)
pgbox()
pglab('Y axis','SDSS-SUBARU',file) # label the plot
#pgsci(3)
#pgerrb(6,y_p,z_p,zerr_p)
pgpt(y_p,z_p,3)
pgsci(1)
#print x_p
#print z_p
#print zerr_p
pgend()
return
def make_model(ROTS):
#polyterms = [['X','X','X'],['X','X','Y'],['X','Y','Y'],['Y','Y','Y'],['X','X'],['X','Y'],['Y','Y'],['X'],['Y']]
polyterms = [['Xpos_ABS','Xpos_ABS'],['Xpos_ABS','Ypos_ABS'],['Ypos_ABS','Ypos_ABS'],['Xpos_ABS'],['Ypos_ABS']]
''' break up parameters into rotation specific and exposure specific (the zeropoints) '''
model = {'ROT_SPECIFIC':[],'EXP_SPECIFIC':[]}
for ROTATION in ROTS.keys():
for term in polyterms:
name = reduce(lambda x,y: x + 'T' + y,term)
model['ROT_SPECIFIC'].append({'name':ROTATION+'$'+name,'rotation':ROTATION,'term':term,'value':0.1})
for IMAGE in ROTS[ROTATION]:
model['EXP_SPECIFIC'].append({'name':IMAGE+'$zp','image':IMAGE,'term':['zp'],'value':0.01})
fit = {'model':model,'fixed':[],'apply':[]}
print fit
return fit
def calc_model(p,X,Y,data,err):
for i in range(len(self.smodel)):
term = self.smodel[i]
model += p[i] * reduce(lambda x,y: x * y,[self.dict[z] for z in term])
status = 0
return([status, (model-y)/err])
class phot_funct:
def __init__(self,inputmodel,sfixed,EXPS,star_good,sapply=[],zps=0):
''' need to take EXPS and make a vector of parameters to pass to the fitting program as well as a dictionary '''
self.star_good = star_good
self.inputmodel = inputmodel
self.allterms = self.inputmodel['ROT_SPECIFIC'] + self.inputmodel['EXP_SPECIFIC']
self.parstart = [{'value':x['value'],'fixed':0.001} for x in self.allterms] # assign initial values to all parameters
self.pardict = {}
for x in range(len(self.allterms)):
self.pardict[self.allterms[x]['name']] = x
#self.pardict = [{self.allterms[x]['name']:x} for x in range(len(self.allterms))] # dictionary of parameter indicies for parameter names
self.model = [x['term'] for x in self.allterms] # make a list of the form of each term
print 'HERE'
print self.allterms
self.EXPS = EXPS
#self.p_dict = []
#self.smodeldict = {}
#for x in self.sinputmodel:
# self.smodeldict[x['name']] = x['term']
self.sfixed = sfixed
self.sapply = sapply
self.fitvars = {}
#fa = {"y": data, "err": err, 'X':X, 'Y':Y, 'maxVal':maxVal, 'classStar':classStar}
def calc_model(self, p, fjac=None, table=None):
# function you can pass to mpfit
self.dict = {'zp':1, 'table':table}
#print p
redchisqs = []
rows = len(table)
print rows
row_num = 0
for j in self.star_good:
row_num += 1
data = []
errs = []
models = []
numerators = []
denominators = []
for ROT in self.EXPS:
good_exps = []
for exp in self.EXPS[ROT]:
#print exp
if table.field(ROT+'$'+exp+'$MaxVal')[j] + table.field(ROT+'$'+exp+'$BackGr')[j] < 27500 and table.field(ROT+'$'+exp+'$CLASS_STAR')[j] > 0.9:
good_exps.append(exp)
#print good_exps, self.EXPS[ROT]
#print good_stars, X[j], Y[j], y[j], maxVal[j], classStar[j]
if len(good_exps) > 0:
tot = len(good_exps)
import scipy
#models = scipy.zeros(tot)
#numerators = scipy.zeros(tot)
#denominators = scipy.zeros(tot)
for exp in good_exps:
#print self.allterms
model_zp_terms = []
model_position_terms = []
for term in self.allterms:
if term.has_key('image'):
if term['image'] == exp:
model_zp_terms.append(term)
if term.has_key('rotation'):
#print term['rotation'], ROT, str(term['rotation']) == str(ROT)
if str(term['rotation']) == str(ROT):
model_position_terms.append(term)
#print model_zp_terms, model_position_terms
model = 0
''' add positionally depdendent terms '''
for term in model_position_terms:
#print table.field(ROT+'$'+exp+'$'+term['term'][0])[j]
#print self.pardict[term['name']]
model += p[self.pardict[term['name']]] * reduce(lambda x,y: x * y,[table.field(ROT+'$'+exp+'$'+z)[j] for z in term['term']])
''' add the zeropoint for that image '''
for term in model_zp_terms:
#print self.pardict[term['name']]
model += p[self.pardict[term['name']]]
data.append(table.field(ROT+'$'+exp+'$MAG_APER2')[j]**2.)
errs.append(table.field(ROT+'$'+exp+'$MAGERR_APER2')[j]**2.)
models.append(model)
numerators.append((model-table.field(ROT+'$'+exp+'$MAG_APER2')[j])/table.field(ROT+'$'+exp+'$MAGERR_APER2')[j]**2.)
denominators.append(1./table.field(ROT+'$'+exp+'$MAGERR_APER2')[j]**2.)
if len(data)>0:
''' we have already subtracted the image-dependent zeropoint so we just need to subtract the instrinsic magnitude of the star, which we get from an average '''
average = reduce(lambda x,y: x + y,numerators) / reduce(lambda x,y: x + y, denominators)
#print average
chisq = 0
for k in range(len(data)):
chisq += abs(models[k] - data[k] - average) / errs[k]
#print chisq
#print models[k], y[j][k], average, err[j][k]
redchisq = chisq/float(len(data))
#ydiff = y[j]['0'][0] - y[j]['0'][1]
#moddiff = models[0] - models[1]
if 0: #abs(moddiff - ydiff) < 0.001:
print X[j]
print Y[j]
print y[j]
print err[j]
print models
print 'moddiff', models[0] - models[1]
print 'y diff', y[j][0] - y[j][1]
print chisq
print redchisq
redchisqs.append(redchisq)
#redchisqs.append(abs(moddiff-ydiff)/err[j][0])
if row_num%500 == 0: print j
status = 0
import Numeric
redchisqs = Numeric.array(redchisqs)
#print redchisqs
return([status,redchisqs ])
def calc_sigma(self, p, fjac=None, y=None, err=None, X=None, Y=None):
# function you can pass to mpfit
self.dict = {'zp':1., 'color1':color1, 'color2':color2, 'airmass':airmass, 'X':X, 'Y':Y}
model = 0
for i in range(len(self.smodel)):
term = self.smodel[i]
#print term
model += p[i] * reduce(lambda x,y: x * y,[self.dict[z] for z in term])
status = 0
return([model, (model-y)/err])
def calcIllum(size_x, size_y, bin, fit):
import numpy, math, pyfits, os
fitvars = fit['class'].fitvars
x,y = numpy.meshgrid(numpy.arange(0,size_x,bin),numpy.arange(0,size_y,bin))
F=0.1
print 'calculating'
#epsilon = fitvars['X']*x + fitvars['Y']*y + fitvars['XTX']*x**2 + fitvars['YTY']*y**2 + fitvars['XTY']*x*y + fitvars['XTYTY']*x*y*y + fitvars['XTXTY']*x*x*y + fitvars['XTXTX']*x*x*x + fitvars['YTYTY']*y*y*y
epsilon = fitvars['X']*x + fitvars['Y']*y + fitvars['XTX']*x**2 + fitvars['YTY']*y**2 + fitvars['XTY']*x*y
epsilon = fitvars['X']*x + fitvars['Y']*y + fitvars['XTX']*x**2 + fitvars['YTY']*y**2 + fitvars['XTY']*x*y
#correction = 10.**(epsilon/2.5)
print 'writing'
hdu = pyfits.PrimaryHDU(epsilon)
os.system('rm /tmp/correction.fits')
hdu.writeto('/tmp/correction.fits')
print 'done'
return
def random_cmp(x,y):
import random
a = random.random()
b = random.random()
if a > b: return 1
else: return -1
def starStats(supas):
dict = {}
dict['rot'] = 0
dict['sdss'] = 0
dict['sdss_exists'] = 0
for s in supas:
if s['sdss']: dict['sdss'] += 1
if s['sdss_exists']: dict['sdss_exists'] += 1
s = s['supa files']
rot1 = 0
rot0 = 0
for ele in s:
if not dict.has_key(ele['name']):
dict[ele['name']] = 0
dict[ele['name']] += 1
if ele['rotation'] == '1':rot1 = 1
if ele['rotation'] == '0':rot0 = 1
if rot0 and rot1:
dict['rot'] += 1
#print dict['rot'], 'rot'
for key in dict.keys():
print key, dict[key]
def add_single_correction(x,y,fitvars):
cheby_x = [{'n':'0x','f':lambda x,y:1.},{'n':'1x','f':lambda x,y:x},{'n':'2x','f':lambda x,y:2*x**2-1},{'n':'3x','f':lambda x,y:4*x**3.-3*x}]
cheby_y = [{'n':'0y','f':lambda x,y:1.},{'n':'1y','f':lambda x,y:y},{'n':'2y','f':lambda x,y:2*y**2-1},{'n':'3y','f':lambda x,y:4*y**3.-3*y}]
cheby_terms = []
for tx in cheby_x:
for ty in cheby_y:
if fitvars.has_key(tx['n']+ty['n']): # not ((tx['n'] == '0x' and ty['n'] == '0y')): # or (tx['n'] == '0x' and ty['n'] == '1y') or (tx['n'] == '1x' and ty['n'] == '0y')) :
cheby_terms.append({'n':tx['n'] + ty['n'],'fx':tx['f'],'fy':ty['f']})
epsilon = 0
for term in cheby_terms:
epsilon += fitvars[term['n']]*term['fx'](x,y)*term['fy'](x,y)
return epsilon
def illum():
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
keystop = ['PPRUN','ROTATION','OBJNAME']
list = reduce(lambda x,y: x + ',' + y, keystop)
command="SELECT * from fit_db where FILTER='W-J-V' and CLUSTER='MACS1720+35'"
print command
c.execute(command)
results=c.fetchall()
db_keys = describe_db(c,'fit_db')
for line in results:
if 1:
dtop = {}
for i in range(len(db_keys)):
dtop[db_keys[i]] = str(line[i])
#print dtop.keys()
if 0:
for rot in ['0','1']:
supas = re.split('\,',dtop[rot + 'supas'])
import string
if string.find(dtop[rot+'supas'],'None') != -1:
print supas, supas[0]!='None'
if supas[0] != 'None':
crval1std, crval2std = calcDither(supas)
print crval1std, crval2std
save_fit({'PPRUN':dtop['PPRUN'],'FILTER':dtop['FILTER'],'CLUSTER':dtop['CLUSTER'],'dither$' + rot + '$RA':str(crval1std),'dither$' + rot + '$DEC':str(crval2std)})
if 1:
for rot in ['0','1']:
for sample in ['nosdss','sdss']:
print dtop['stat' + rot + sample], dtop['reducedchi$' + sample + '$all'], dtop['CLUSTER'], dtop['FILTER'], rot, sample
os.system('xpaset -p ds9 file ' + dtop[sample + '$all$' + rot + '$im'])
os.system('xpaset -p ds9 contour yes')
print 'plotted'
#os.system('xpaset ds9 contour')
#try:
# stat, blah, im = compSurfaceDiff([dtop[sample + '$rand' + str(num) + '$' + rot + '$im'] for num in [1,2,3,4]])
# save_fit({'FILTER':dtop['FILTER'],'CLUSTER':dtop['CLUSTER'],'stat' + rot + sample:stat})
#except:
# print 'fail'
def calcDither(supas):
crval1s = []
crval2s = []
print supas
for supa in supas:
dt = get_files(supa)
print supa
if dt['CRVAL1'] is None:
length(supa, dt['FLAT_TYPE'])
dt = get_files(supa)
print supa
print dt['CRVAL1'], dt['CRVAL2']
crval1s.append(float(dt['CRVAL1']))
crval2s.append(float(dt['CRVAL2']))
import numpy
print crval1s, crval2s, numpy.std(crval1s)*3600, numpy.std(crval2s)*3600
return numpy.std(crval1s)*3600, numpy.std(crval2s)*3600
def compSurfaceDiff(images, xpix=None, ypix=None):
import numpy, scipy
import sys, pyfits
surfs = [pyfits.getdata(surfname) for surfname in images]
refsurf = surfs[0]
print refsurf.shape
if xpix is None:
xlow,xhigh = 0,refsurf.shape[1]
else: xlow,xhigh = xpix
if ypix is None:
ylow,yhigh = 0,refsurf.shape[0]
else: ylow,yhigh = ypix
#print numpy.array([scipy.median(surf[ylow:yhigh,xlow:xhigh].flatten()) for surf in surfs] )
normsurfs = numpy.array([surf-scipy.median(surf[ylow:yhigh,xlow:xhigh].flatten()) \
for surf in surfs])
stddiff_surf = numpy.std(normsurfs, axis=0)
stddiff_median = numpy.median(stddiff_surf[xlow:xhigh,ylow:yhigh].flatten())
stddiff_mean = numpy.mean(stddiff_surf[xlow:xhigh,ylow:yhigh].flatten())
return stddiff_median, stddiff_mean, stddiff_surf
def linear_fit(CLUSTER,FILTER,PPRUN):
print CLUSTER,FILTER, PPRUN
SDSS=True
maxSigIter=50
solutions = []
fit_db = {}
import pickle
''' get data '''
EXPS = getTableInfo()
for ROT in EXPS.keys():
print EXPS[ROT]
save_fit({'PPRUN':PPRUN,'FILTER':FILTER,'CLUSTER':CLUSTER,str(ROT)+'images':len(EXPS[ROT]),str(ROT)+'supas':reduce(lambda x,y:x+','+y,EXPS[ROT])})
print EXPS
''' create chebychev polynomials '''
cheby_x = [{'n':'0x','f':lambda x,y:1.},{'n':'1x','f':lambda x,y:x},{'n':'2x','f':lambda x,y:2*x**2-1},{'n':'3x','f':lambda x,y:4*x**3.-3*x}]
cheby_y = [{'n':'0y','f':lambda x,y:1.},{'n':'1y','f':lambda x,y:y},{'n':'2y','f':lambda x,y:2*y**2-1},{'n':'3y','f':lambda x,y:4*y**3.-3*y}]
cheby_terms = []
cheby_terms_no_linear = []
for tx in cheby_x:
for ty in cheby_y:
if not ((tx['n'] == '0x' and ty['n'] == '0y')): # or (tx['n'] == '0x' and ty['n'] == '1y') or (tx['n'] == '1x' and ty['n'] == '0y')) :
cheby_terms.append({'n':tx['n'] + ty['n'],'fx':tx['f'],'fy':ty['f']})
if not ((tx['n'] == '0x' and ty['n'] == '0y') or (tx['n'] == '0x' and ty['n'] == '1y') or (tx['n'] == '1x' and ty['n'] == '0y')) :
cheby_terms_no_linear.append({'n':tx['n'] + ty['n'],'fx':tx['f'],'fy':ty['f']})
#ROTS, data, err, X, Y, maxVal, classStar = diffCalcNew()
#save = {'ROTS': ROTS, 'data':data,'err':err,'X':X,'Y':Y,'maxVal':maxVal,'classStar':classStar}
#uu = open('/tmp/store','w')
#import pickle
#pickle.dump(save,uu)
#uu.close()
''' EXPS has all of the image information for different rotations '''
''' make model '''
#fit = make_model(EXPS)
#position_fit = make_position_model(EXPS)
print fit
''' see if in sdss, linear or not '''
dt = get_files(EXPS[EXPS.keys()[0]][0])
LENGTH1, LENGTH2 = dt['LENGTH1'], dt['LENGTH2']
print LENGTH1, LENGTH2
cov = sdss_coverage(dt['SUPA'],dt['FLAT_TYPE'])
save_fit({'PPRUN':PPRUN,'FILTER':FILTER,'CLUSTER':CLUSTER,'sdss_coverage':str(cov)})
if 1:
EXPS, star_good,supas = selectGoodStars(EXPS)
uu = open('/tmp/selectGoodStars','w')
import pickle
pickle.dump({'EXPS':EXPS,'star_good':star_good,'supas':supas},uu)
uu.close()
import pickle
f=open('/tmp/selectGoodStars','r')
m=pickle.Unpickler(f)
d=m.load()
EXPS = d['EXPS']
star_good = d['star_good']
supas = d['supas']
starStats(supas)
print len(star_good)
#cheby_terms_use = cheby_terms_no_linear
fitvars_fiducial = False
if cov:
samples = [['sdss',cheby_terms]] #[['nosdss',cheby_terms_no_linear],['sdss',cheby_terms]]
else:
samples = [['nosdss',cheby_terms_no_linear]]
for sample,cheby_terms_use in samples:
import scipy
import astropy, astropy.io.fits as pyfits
p = pyfits.open('/tmp/final.cat')
table = p[1].data
from copy import copy
tab = {}
for ROT in EXPS.keys():
for y in EXPS[ROT]:
for key in [ROT+'$'+y+'$MAG_APER2',ROT+'$'+y+'$MAG_AUTO',ROT+'$'+y+'$MaxVal',ROT+'$'+y+'$BackGr',ROT+'$'+y+'$CLASS_STAR',ROT+'$'+y+'$Flag',ROT+'$'+y+'$MAGERR_AUTO',ROT+'$'+y+'$Xpos_ABS',ROT+'$'+y+'$Ypos_ABS','SDSSstdMagErr_corr','SDSSstdMag_corr','SDSSstdMagColor_corr']:
tab[key] = copy(table.field(key))
coord_conv_x = lambda x:(2.*x-0-LENGTH1)/(LENGTH1-0)
coord_conv_y = lambda x:(2.*x-0-LENGTH2)/(LENGTH2-0)
save_fit({'FILTER':FILTER,'CLUSTER':CLUSTER,'PPRUN':PPRUN,'supas':len(supas),'sdss_stars':len(filter(lambda x:x['sdss'],supas))})
supas_copy = copy(supas)
for sample_size in ['all']: #'rand1','rand2','rand3','rand4']: #,'rand3']:
''' take a random sample of half '''
if sample_size != 'all':
## changing the CLASS_STAR criterion upwards helps as does increasing the sigma on the SDSS stars
print len(supas)
l = range(len(supas_copy))
print l[0:10]
l.sort(random_cmp)
print l[0:10]
''' shorten star_good, supas '''
supas = [supas_copy[i] for i in l[0:len(supas_copy)/2]]
else:
supas = copy(supas_copy)
print len(supas), 'supas', supas[0]
columns = []
column_dict = {}
''' position-dependent terms in design matrix '''
position_columns = []
index = -1
for ROT in EXPS.keys():
for term in cheby_terms_use:
index += 1
name = str(ROT) + '$' + term['n'] # + reduce(lambda x,y: x + 'T' + y,term)
position_columns.append({'name':name,'fx':term['fx'],'fy':term['fy'],'rotation':ROT,'index':index})
#print position_columns
columns.append(position_columns)
''' zero point terms in design matrix '''
zp_columns = []
for ROT in EXPS.keys():
for exp in EXPS[ROT]:
zp_columns.append({'name':'zp_'+exp,'image':exp,'im_rotation':ROT})
if SDSS:
zp_columns.append({'name':'zp_SDSS','image':'sdss'})
columns.append(zp_columns)
color_columns=[{'name':'SDSS_color'}]
columns.append(color_columns)
mag_columns = []
for star in supas:
mag_columns.append({'name':'mag_' + str(star['table index'])})
columns.append(mag_columns)
column_names = [x['name'] for x in reduce(lambda x,y: x+y,columns)]
print column_names[0:100]
''' total number of fit parameters summed over each rotation + total number of images of all rotations + total number of stars to fit '''
x_length = len(position_columns) + len(zp_columns) + len(color_columns) + len(mag_columns)
y_length = reduce(lambda x,y: x + y,[len(star['supa files'])*2 for star in supas]) # double number of rows for SDSS
print x_length, y_length
Bstr = ''
row_num = -1
supa_num = -1
''' each star '''
print 'creating matrix....'
sigmas = []
inst = []
data = {}
magErr = {}
whichimage = {}
X = {}
Y = {}
color = {}
for ROT in EXPS.keys():
data[ROT] = []
magErr[ROT] = []
X[ROT] = []
Y[ROT] = []
color[ROT] = []
whichimage[ROT] = []
for star in supas:
supa_num += 1
''' each exp of each star '''
if 1:
star_A = []
star_B = []
sigmas = []
for exp in star['supa files']:
row_num += 1
col_num = -1
rotation = exp['rotation']
sigma = tab[str(rotation) + '$' + exp['name'] + '$MAGERR_AUTO'][star['table index']]
if sigma < 0.001: sigma = 0.001
sigma = sigma # * 1000.
#sigma = 1
for c in position_columns:
col_num += 1
if c['rotation'] == rotation:
n = str(rotation) + '$' + exp['name'] + '$Xpos_ABS'
x = tab[str(rotation) + '$' + exp['name'] + '$Xpos_ABS'][star['table index']]
y = tab[str(rotation) + '$' + exp['name'] + '$Ypos_ABS'][star['table index']]
x = coord_conv_x(x)
y = coord_conv_y(y)
value = c['fx'](x,y)*c['fy'](x,y)/sigma
star_A.append([row_num,col_num,value])
first_column = True
for c in zp_columns:
col_num += 1
#if not degeneracy_break[c['im_rotation']] and c['image'] == exp['name']:
if (first_column is not True and c['image'] == exp['name']):
value = 1./sigma
star_A.append([row_num,col_num,value])
first_column = False
''' fit for the color term dependence for SDSS comparison '''
col_num += 1
''' magnitude column -- include the correct/common magnitude '''
col_num += 1
value = 1./sigma
star_A.append([row_num,col_num+supa_num,value])
value = tab[str(rotation) + '$' + exp['name'] + '$MAG_AUTO'][star['table index']]/sigma
x = tab[str(rotation) + '$' + exp['name'] + '$Xpos_ABS'][star['table index']]
y = tab[str(rotation) + '$' + exp['name'] + '$Ypos_ABS'][star['table index']]
x = coord_conv_x(x)
y = coord_conv_y(y)
if fitvars_fiducial:
value += add_single_correction(x,y,fitvars_fiducial)
star_B.append([row_num,value])
sigmas.append(sigma)
inst.append({'type':'sdss','A_array':star_A, 'B_array':star_B, 'sigma_array': sigmas})
if star['sdss'] and sample=='sdss':
star_A = []
star_B = []
sigmas = []
for exp in star['supa files']:
row_num += 1
col_num = -1
rotation = exp['rotation']
#sigma = tab[str(rotation) + '$' + exp['name'] + '$MAGERR_AUTO'][star['table index']]
sigma = tab['SDSSstdMagErr_corr'][star['table index']]
for c in position_columns:
col_num += 1
first_column = True
for c in zp_columns:
col_num += 1
''' remember that the good magnitude does not have any zp dependence!!! '''
#if (first_column is not True and c['image'] == exp['name']) or c['image'] == 'sdss':
if c['image'] == 'sdss':
value = 1./sigma
star_A.append([row_num,col_num,value])
first_column = False
''' fit for the color term dependence '''
for c in color_columns:
col_num += 1
value = tab['SDSSstdMagColor_corr'][star['table index']]/sigma
star_A.append([row_num,col_num,value])
col_num += 1
''' magnitude column -- include the correct/common magnitude '''
value = 1./sigma
star_A.append([row_num,col_num+supa_num,value])
#value = (tab[str(rotation)+'$'+exp['name']+'$MAG_AUTO'][star['table index']] - tab['SDSSstdMag_corr'][star['table index']])/sigma
#print tab[str(rotation)+'$'+exp['name']+'$MAG_AUTO'][star['table index']], tab['SDSSstdMag_corr'][star['table index']]
if 1:
data[rotation].append(tab[str(rotation)+'$'+exp['name']+'$MAG_AUTO'][star['table index']] - tab['SDSSstdMag_corr'][star['table index']])
magErr[rotation].append(tab['SDSSstdMagErr_corr'][star['table index']])
whichimage[rotation].append(exp['name'])
X[rotation].append(tab[str(rotation) + '$' + exp['name'] + '$Xpos_ABS'][star['table index']])
Y[rotation].append(tab[str(rotation) + '$' + exp['name'] + '$Ypos_ABS'][star['table index']])
color[rotation].append(tab['SDSSstdMagColor_corr'][star['table index']])
value = tab['SDSSstdMag_corr'][star['table index']]/sigma
star_B.append([row_num,value])
sigmas.append(sigma)
#print star_A, star_B, sigmas
inst.append({'type':'sdss','A_array':star_A, 'B_array':star_B, 'sigma_array': sigmas})
''' save the SDSS matches '''
sdss_matches = {'data':data,'magErr':magErr,'whichimage':whichimage,'X':X,'Y':Y,'color':color}
uu = open('/tmp/sdss','w')
import pickle
pickle.dump(sdss_matches,uu)
uu.close()
''' do fitting '''
if 1:
''' make matrices/vectors '''
Ainst_expand = []
for z in inst:
for y in z['A_array']:
Ainst_expand.append(y)
Binst_expand = []
for z in inst:
for y in z['B_array']:
Binst_expand.append(y)
print len(Binst_expand)
sigmas = []
for z in inst:
for y in z['sigma_array']:
sigmas.append(y)
print len(Binst_expand)
ylength = len(Binst_expand)
print y_length, x_length
print len(Ainst_expand), len(Binst_expand)
print 'lengths'
A = scipy.zeros([y_length,x_length])
B = scipy.zeros(y_length)
Af = open('A','w')
Bf = open('b','w')
for ele in Ainst_expand:
Af.write(str(ele[0]) + ' ' + str(ele[1]) + ' ' + str(ele[2]) + '\n')
#print ele, y_length, x_length
#print ele
A[ele[0],ele[1]] = ele[2]
for ele in Binst_expand:
B[ele[0]] = ele[1]
Bstr = reduce(lambda x,y:x+' '+y,[str(z[1]) for z in Binst_expand])
Bf.write(Bstr)
Bf.close()
Af.close()
print 'finished matrix....'
print len(position_columns), len(zp_columns)
print A[0,0:30], B[0:10], scipy.shape(A), scipy.shape(B)
print A[1,0:30], B[0:10], scipy.shape(A), scipy.shape(B)
print 'hi!'
Af = open('/tmp/B','w')
for i in range(len(B)):
Af.write(str(B[i]) + '\n')
Af.close()
print 'solving matrix...'
import re, os
os.system('rm x')
os.system('sparse < A')
bout = open('x','r').read()
res = re.split('\s+',bout[:-1].replace('nan','0'))
U = [float(x) for x in res][:x_length]
params = {}
for i in range(len(U)):
params[column_names[i]] = U[i]
print 'finished solving...'
#from scipy import linalg
#print 'doing linear algebra'
#U = linalg.lstsq(A,B)
#print U[0][0:30]
''' calculate reduced chi-squared value'''
print scipy.shape(A), len(U), x_length, len(res)
Bprime = scipy.dot(A,U)
print scipy.shape(Bprime),scipy.shape(B)
Bdiff = (abs(abs(B-Bprime))).sum()/len(B)
print (B-Bprime)[:300]
print U[0:20]
print x[0:20]
print Bdiff, 'reduced chi-squared'
save_fit({'PPRUN':PPRUN,'FILTER':FILTER,'CLUSTER':CLUSTER,'reducedchi$'+sample+'$'+sample_size:Bdiff})
data_directory = '/nfs/slac/g/ki/ki04/pkelly/illumination/'
position_fit = [['Xpos_ABS','Xpos_ABS'],['Xpos_ABS','Ypos_ABS'],['Ypos_ABS','Ypos_ABS'],['Xpos_ABS'],['Ypos_ABS']]
import re
''' save fit information '''
print sample+'$'+sample_size+'$' + str(ROT) + '$positioncolumns',reduce(lambda x,y: x+','+y,[z['name'] for z in position_columns])
save_fit({'PPRUN':PPRUN,'FILTER':FILTER,'CLUSTER':CLUSTER,sample+'$'+sample_size+'$positioncolumns':reduce(lambda x,y: x+','+y,[z['name'] for z in position_columns])})
dtmp = {}
for ROT in EXPS.keys():
print 'ROT', ROT
fitvars = {}
for ele in position_columns:
res = re.split('$',ele['name'])
fitvars[ele['name']] = U[ele['index']]
dtmp[sample+'$'+sample_size+'$'+ele['name']]=fitvars[ele['name']]
print ele['name'], fitvars[ele['name']]
dtmp.update({'PPRUN':PPRUN,'FILTER':FILTER,'CLUSTER':CLUSTER})
save_fit(dtmp)
''' make diagnostic plots '''
if 1:
import re
d = get_fits(CLUSTER,FILTER,PPRUN)
column_prefix = sample+'$'+sample_size+'$'
position_columns_names = re.split('\,',d[column_prefix + 'positioncolumns'])
fitvars = {}
cheby_terms_use = []
for ele in position_columns:
res = re.split('$',ele['name'])
fitvars[ele['name']] = float(d[sample+'$'+sample_size+'$'+ele['name']])
for term in cheby_terms:
if term['n'] == ele['name'][2:]:
cheby_terms_use.append(term)
print cheby_terms_use, fitvars
''' make images of illumination corrections '''
for ROT in EXPS.keys():
size_x=8000
size_y=10000
bin=100
import numpy, math, pyfits, os
x,y = numpy.meshgrid(numpy.arange(0,size_x,bin),numpy.arange(0,size_y,bin))
F=0.1
print 'calculating'
x = coord_conv_x(x)
y = coord_conv_y(y)
epsilon = 0
for term in cheby_terms_use:
#print 'fitvar',fitvars[str(ROT)+'$'+term['n']],'fx',term['fx'](x,y),'fy',term['fy'](x,y)
#print fitvars[str(ROT)+'$'+term['n']]*term['fx'](x,y)
#print term['fx'](x,y)*term['fy'](x,y)
epsilon += fitvars[str(ROT)+'$'+term['n']]*term['fx'](x,y)*term['fy'](x,y)
print 'writing'
hdu = pyfits.PrimaryHDU(epsilon)
#os.system('rm /tmp/correction' + ROT + filter + sample_size + '.fits')
#hdu.writeto('/tmp/correction' + ROT + filter + sample_size + '.fits')
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':CLUSTER}
illum_dir = path + 'PHOTOMETRY/ILLUMINATION/' + FILTER + '/' + str(ROT)
os.system('mkdir -p ' + illum_dir)
im = illum_dir + '/correction' + sample + sample_size + '.fits'
save_fit({'PPRUN':PPRUN,'FILTER':FILTER,'CLUSTER':CLUSTER,sample+'$'+sample_size+'$'+str(ROT)+'$im':im})
os.system('rm ' + im)
hdu.writeto(im)
print 'done'
if 0:
''' calculate SDSS plot differences, before and after '''
for ROT in EXPS.keys():
for star in supas:
print star
if star['sdss']:
data[ROT] = scipy.array(data[ROT])
color[ROT] = scipy.array(color[ROT])
''' apply the color term measured from the data '''
zp_correction = scipy.array([float(params['zp_'+x]) for x in whichimage[ROT]])
data1 = data[ROT] - params['SDSS_color']*color[ROT] - zp_correction
data2 = data1 - (data1/data1*scipy.median(data1))
plot_color(color[ROT], data2)
print X[ROT]
x = coord_conv_x(X[ROT])
y = coord_conv_y(Y[ROT])
epsilon = 0
for term in cheby_terms:
data += fitvars[term[str(ROT)+'$'+'n']]*term['fx'](x,y)*term['fy'](x,y)
#print whichimage[ROT][0:100]
#data1 = data[ROT] - zp_correction
#data2 = data1 - (data1/data1*scipy.median(data1))
#plot_color(color[ROT], data2)
print magErr[ROT][0:20]
calcDataIllum('rot'+str(ROT)+FILTER,10000,8000,data[ROT],magErr[ROT],X[ROT],Y[ROT],pth='/tmp/',rot=0)
print 'calcDataIllum', len(data[ROT])
raw_input()
return
def residual_plots():
for ROT in EXPS.keys():
print 'ROT', ROT
fitvars = {}
for ele in position_columns:
res = re.split('$',ele['name'])
if res[0] == ROT:
fitvars[ele['name'][2:]] = U[ele['index']]
save_fit({'PPRUN':PPRUN,'FILTER':FILTER,'CLUSTER':CLUSTER,sample+'$'+sample_size+'$'+ele['name'].replace('$','$'):fitvars[ele['name'][2:]]})
print ele['name'], fitvars[ele['name'][2:]]
if 0:
uu = open('/tmp/fitvars' + ROT,'w')
import pickle
pickle.dump(fitvars,uu)
uu.close()
size_x=8000
size_y=10000
bin=100
import numpy, math, pyfits, os
x,y = numpy.meshgrid(numpy.arange(0,size_x,bin),numpy.arange(0,size_y,bin))
F=0.1
print 'calculating'
x = coord_conv_x(x)
y = coord_conv_y(y)
epsilon = 0
for term in cheby_terms_use:
epsilon += fitvars[term['n']]*term['fx'](x,y)*term['fy'](x,y)
print 'writing'
hdu = pyfits.PrimaryHDU(epsilon)
#os.system('rm /tmp/correction' + ROT + filter + sample_size + '.fits')
#hdu.writeto('/tmp/correction' + ROT + filter + sample_size + '.fits')
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':CLUSTER}
illum_dir = path + 'PHOTOMETRY/ILLUMINATION/' + FILTER + '/' + str(ROT)
os.system('mkdir -p ' + illum_dir)
im = illum_dir + '/correction' + sample + sample_size + '.fits'
save_fit({'PPRUN':PPRUN,'FILTER':FILTER,'CLUSTER':CLUSTER,sample+'$'+sample_size+'$'+str(ROT)+'$im':im})
os.system('rm ' + im)
hdu.writeto(im)
print 'done'
epsilon = 10.**(epsilon/2.5)
#correction = 10.**(epsilon/2.5)
# xaxis is always vertical!!!
#print 'writing'
#hdu = pyfits.PrimaryHDU(epsilon)
#os.system('rm /tmp/fcorrection' + ROT + filter + '.fits')
#hdu.writeto('/tmp/fcorrection' + ROT + filter + '.fits')
print 'done'
return
def fit():
maxSigIter=50
solutions = []
import pickle
''' get data '''
EXPS = getTableInfo()
print EXPS
#ROTS, data, err, X, Y, maxVal, classStar = diffCalcNew()
#save = {'ROTS': ROTS, 'data':data,'err':err,'X':X,'Y':Y,'maxVal':maxVal,'classStar':classStar}
#uu = open('/tmp/store','w')
#import pickle
#pickle.dump(save,uu)
#uu.close()
''' EXPS has all of the image information for different rotations '''
''' make model '''
fit = make_model(EXPS)
print fit
star_good = selectGoodStars(EXPS)
uu = open('/tmp/store','w')
import pickle
pickle.dump(star_good,uu)
uu.close()
import pickle
f=open('/tmp/store','r')
m=pickle.Unpickler(f)
star_good=m.load()
fit['class'] = phot_funct(fit['model'],fit['fixed'],EXPS,star_good,fit['apply'])
import astropy, astropy.io.fits as pyfits
p = pyfits.open('/tmp/final.cat')
table = p[1].data
import copy
table_save = copy.copy(table)
for i in range(maxSigIter):
fa = {"table": table_save}
func = fit['class'].calc_model
#functkw takes input data arrays
#parinfo takes initial guess and constraints on parameters
#import optimize
#params, covar, info, mesg, ier = optimize.leastsq(func,guess,args = (points,vals,errs), full_output=True)
import mpfit
m = mpfit.mpfit(func, functkw=fa,
parinfo=fit['class'].parstart,
maxiter=1000, quiet=0)
print m.params, m.perror
if (m.status <= 0):
print 'error message = ', m.errmsg
condition = Numeric.zeros(len(data))
break
print m.params,m.perror
#fits = [{'vars':['zp','color1coeff','color1coeff2'],'parinfo':[{'value':p[0],'fixed':0},{'value':p[1],'fixed':0},{'value':p[2],'fixed':0},'function':phot_funct_secondorder,'fit_type':'no_airmass'}]
fit['class'].fitvars = {}
for ele in range(len(fit['class'].smodel)):
print ele, fit['class'].smodel
name = make_name(fit['class'].smodel[ele])
print ele, fit['class'].fitvars, name, m.params[ele]
fit['class'].fitvars[name] = m.params[ele]
fit['class'].fitvars[name + '_err'] = m.perror[ele]
perror = copy.copy(m.perror)
# Compute a 3 sigma rejection criterion
print m.params, data_rec[0], data[0]
#condition, redchisq = SigmaCond(params, data_save, data,
# airmass_save, airmass,
# color1_save, color1, color2_save, color2, err_save, err, sigmareject)
calcIllum(10000, 10000, 100, fit)
if len(data_save) > 1:
(mo_save, reddm) = fit['class'].calc_sigma(m.params, airmass_save, color1_save, color2_save, data_save, err_save, X_save, Y_save)
#reddm = (data-mo)/err
redchisq = Numeric.sqrt(Numeric.sum(Numeric.power(reddm, 2)) / (len(reddm) - 1))
dm = data_save-mo_save
#dm_save = data_save - mo_save
print len(data_save), len(mo_save)
dm_save = data_save - mo_save
mean = Numeric.sum(dm)/len(dm)
sigma = Numeric.sqrt(Numeric.sum(Numeric.power(mean-dm, 2)) / (len(dm) - 1))
# you can pick either
#condition = Numeric.less(Numeric.fabs(dm_save), float(sigmareject) * sigma)
condition = Numeric.less(Numeric.fabs(dm_save), float(sigmareject) * err_save)
else:
condition = Numeric.zeros(len(data_save))
print redchisq
# Keep everything (from the full data set!) that is within
# the 3 sigma criterion
#data_sig = Numeric.compress(condition, data_save)
data = Numeric.compress(condition, data_rec)
err = Numeric.compress(condition, err_save)
X = Numeric.compress(condition, X_save)
Y = Numeric.compress(condition, Y_save)
new_len = len(data)
if float(new_len)/float(save_len) < 0.5:
print "Rejected more than 50% of all measurements."
print "Aborting this fit."
break
# No change
if new_len == old_len:
print "Converged! (%d iterations)" % (i+1, )
print "Kept %d/%d stars." % (new_len, save_len)
break
#print params, perror, condition
meanerr = Numeric.sum(err_save)/len(err_save)
def make_name(name):
if len(name) > 1:
name = reduce(lambda x,y: x + 'T' + y,name)
else:
name = name[0]
return name
''' read in the photometric calibration and apply it to the data '''
def apply_photometric_calibration(SUPA,FLAT_TYPE):
from config_bonn import info
import utilities, Numeric, os
reload(utilities)
from utilities import *
dict = get_files(SUPA,FLAT_TYPE)
print dict.keys()
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
print dict['starcat']
import astropy, astropy.io.fits as pyfits
hdulist1 = pyfits.open(dict['starcat'])
#print hdulist1["STDTAB"].columns
table = hdulist1["STDTAB"].data
other_info = info[dict['filter']]
filters_info = make_filters_info([dict['filter']])
compband = filters_info[0][1] ## use the SDSS/other comparison band
color1which = other_info['color1']
print filters_info, compband
print dict['OBJNAME']
for key in dict.keys():
import string
if string.find(key,'color') != -1:
print key
#calib = get_calibrations_threesecond(dict['OBJNAME'],filters_info)
#print 'calib', calib
#raw_input()
model = convert_modelname_to_array('zpPcolor1') #dict['model_name%'+dict['filter']])
cols = [pyfits.Column(name=column.name, format=column.format,array=Numeric.array(0 + hdulist1["STDTAB"].data.field(column.name))) for column in hdulist1["STDTAB"].columns]
data = color_std_correct(model,dict,table,dict['filter'],compband+'mag',color1which) # correct standard magnitude into instrumntal system -- at least get rid of the color term
cols.append(pyfits.Column(name='stdMag_corr', format='E',array=data))
cols.append(pyfits.Column(name='stdMagErr_corr', format='E',array=Numeric.array(0 + hdulist1["STDTAB"].data.field(compband+'err'))))
cols.append(pyfits.Column(name='stdMagColor_corr', format='E',array=Numeric.array(0 + hdulist1["STDTAB"].data.field(color1which))))
cols.append(pyfits.Column(name='stdMagClean_corr', format='E',array=Numeric.array(0 + hdulist1["STDTAB"].data.field('Clean'))))
type = 'star'
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
outcat = path + 'PHOTOMETRY/ILLUMINATION/sdssmatch__' + search_params['SUPA'] + '_' + type + '.cat'
print cols
hdu = pyfits.PrimaryHDU()
hdulist = pyfits.HDUList([hdu])
tbhu = pyfits.BinTableHDU.from_columns(cols)
hdulist.append(tbhu)
hdulist[1].header['EXTNAME']='OBJECTS'
os.system('rm ' + outcat)
hdulist.writeto( outcat )
print 'wrote out new cat'
save_exposure({'sdssmatch':outcat},SUPA,FLAT_TYPE)
def plot_color(color,data):
import numpy, math, pyfits, os
import copy
from ppgplot import *
pgbeg("/XTERM",1,1)
pgiden()
pgpanl(1,1)
from Numeric import *
x = copy.copy(color) #hdulist1["OBJECTS"].data.field(color1which)
y = copy.copy(data) #hdulist1["OBJECTS"].data.field(compband+'mag') - data
plotx = copy.copy(x)
ploty = copy.copy(y)
x.sort()
y.sort()
mediany = y[int(len(y)/2.)]
lowx=-2 #x[2]
highx=2 #x[-2]
lowy=mediany + 1.5
highy=mediany -1.5
pgswin(lowx,highx,lowy,highy)
plotx = array(plotx)
ploty = array(ploty)
#pylab.scatter(z,x)
pglab('Mag','Mag - Mag(Inst)')
#print plotx, ploty
pgpt(plotx,ploty,3)
pgbox()
pgend()
def hold():
if 0: #star['sdss']:
star_A = []
star_B = []
sigmas = []
for exp in star['supa files']:
row_num += 1
col_num = -1
rotation = exp['rotation']
#sigma = tab[str(rotation) + '$' + exp['name'] + '$MAGERR_AUTO'][star['table index']]
sigma = tab['SDSSstdMagErr_corr'][star['table index']]
for c in position_columns:
col_num += 1
first_column = True
for c in zp_columns:
col_num += 1
''' remember that the good magnitude does not have any zp dependence!!! '''
#if (first_column is not True and c['image'] == exp['name']) or c['image'] == 'sdss':
if c['image'] == 'sdss':
value = 1./sigma
star_A.append([row_num,col_num,value])
first_column = False
''' fit for the color term dependence '''
for c in color_columns:
col_num += 1
value = tab['SDSSstdMagColor_corr'][star['table index']]/sigma
star_A.append([row_num,col_num,value])
col_num += 1
''' magnitude column -- include the correct/common magnitude '''
value = 1./sigma
star_A.append([row_num,col_num+supa_num,value])
#value = (tab[str(rotation)+'$'+exp['name']+'$MAG_AUTO'][star['table index']] - tab['SDSSstdMag_corr'][star['table index']])/sigma
#print tab[str(rotation)+'$'+exp['name']+'$MAG_AUTO'][star['table index']], tab['SDSSstdMag_corr'][star['table index']]
data[rotation].append(tab[str(rotation)+'$'+exp['name']+'$MAG_AUTO'][star['table index']] - tab['SDSSstdMag_corr'][star['table index']])
magErr[rotation].append(tab['SDSSstdMagErr_corr'][star['table index']])
whichimage[rotation].append(exp['name'])
X[rotation].append(tab[str(rotation) + '$' + exp['name'] + '$Xpos_ABS'][star['table index']])
Y[rotation].append(tab[str(rotation) + '$' + exp['name'] + '$Ypos_ABS'][star['table index']])
color[rotation].append(tab['SDSSstdMagColor_corr'][star['table index']])
star_B.append([row_num,value])
sigmas.append(sigma)
inst.append({'type':'sdss','A_array':star_A, 'B_array':star_B, 'sigma_array': sigmas})
def save_fit(dict,CLUSTER=None,FILTER=None):
if CLUSTER!= None and FILTER!= None:
dict['CLUSTER'] = CLUSTER
dict['FILTER'] = FILTER
db2,c = connect_except()
db = 'fit_db'
#c.execute("DROP TABLE IF EXISTS fit_db")
command = "CREATE TABLE IF NOT EXISTS " + db + " ( id MEDIUMINT NOT NULL AUTO_INCREMENT, PRIMARY KEY (id))"
print command
c.execute(command)
from copy import copy
floatvars = {}
stringvars = {}
#copy array but exclude lists
import string
letters = string.ascii_lowercase + string.ascii_uppercase.replace('E','') + '_' + '-'
for ele in dict.keys():
type = 'float'
for l in letters:
if string.find(str(dict[ele]),l) != -1:
type = 'string'
if type == 'float':
floatvars[ele] = str(float(dict[ele]))
elif type == 'string':
stringvars[ele] = dict[ele]
# make database if it doesn't exist
print 'floatvars', floatvars
print 'stringvars', stringvars
for column in stringvars:
try:
command = 'ALTER TABLE ' + db + ' ADD ' + column + ' varchar(240)'
c.execute(command)
except: nope = 1
for column in floatvars:
try:
command = 'ALTER TABLE ' + db + ' ADD ' + column + ' float(30)'
c.execute(command)
except: nope = 1
# insert new observation
CLUSTER = dict['CLUSTER']
FILTER = dict['FILTER']
c.execute("SELECT CLUSTER from " + db + " where CLUSTER = '" + CLUSTER + "' and FILTER = '" + FILTER + "'")
results = c.fetchall()
print results
if len(results) > 0:
print 'already added'
else:
command = "INSERT INTO " + db + " (CLUSTER,FILTER) VALUES ('" + dict['CLUSTER'] + "','" + dict['FILTER'] + "')"
print command
c.execute(command)
import commands
vals = ''
for key in stringvars.keys():
print key, stringvars[key]
vals += ' ' + key + "='" + str(stringvars[key]) + "',"
for key in floatvars.keys():
print key, floatvars[key]
vals += ' ' + key + "='" + floatvars[key] + "',"
vals = vals[:-1]
command = "UPDATE " + db + " set " + vals + " WHERE CLUSTER='" + dict['CLUSTER'] + "' AND FILTER='" + dict['FILTER'] + "'"
print command
c.execute(command)
print vals
#names = reduce(lambda x,y: x + ',' + y, [x for x in floatvars.keys()])
#values = reduce(lambda x,y: str(x) + ',' + str(y), [floatvars[x] for x in floatvars.keys()])
#names += ',' + reduce(lambda x,y: x + ',' + y, [x for x in stringvars.keys()])
#values += ',' + reduce(lambda x,y: x + ',' + y, ["'" + str(stringvars[x]) + "'" for x in stringvars.keys()])
#command = "INSERT INTO illumination_db (" + names + ") VALUES (" + values + ")"
#print command
#os.system(command)
def gather_exposures_all(filters=None):
#if not filters:
# filters = ['B','W-J-B','W-J-V','W-C-RC','W-C-IC','I','W-S-Z+']
import os, re
from glob import glob
dirs = glob(os.environ['subdir'] + '/TEST*')
print len(dirs)
for i in range(len(dirs)):
dir = dirs[i]
print 'dir',dir
subdirs = glob(dir + '/*')
print 'subdirs',subdirs
for subdir in subdirs:
try:
slash = re.split('/',subdir)[-1]
res = re.split('_',slash)
#print res
if len(res) > 1:
files = glob(subdir+'/SCIENCE/*fits')
#print files, subdir + '/SCIENCE/*fits'
if len(files)>0:
#search_params = initialize(filter,OBJNAME)
import os, re, bashreader, sys, string, utilities
from glob import glob
from copy import copy
#files = glob(searchstr)
files.sort()
exposures = {}
import MySQLdb, sys, os, re
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
for file in files:
if string.find(file,'wcs') == -1 and string.find(file,'.sub.fits') == -1: # and string.find(file,'I.fits') == -1:
res = re.split('_',re.split('/',file)[-1])
exp_name = res[0]
if not exposures.has_key(exp_name): exposures[exp_name] = {'images':[],'keywords':{}}
exposures[exp_name]['images'].append(file) # exp_name is the root of the image name
if len(exposures[exp_name]['keywords'].keys()) == 0: #not exposures[exp_name]['keywords'].has_key('ROTATION'): #if exposure does not have keywords yet, then get them -- this makes sure you only record each SUPA file once!!!
#exposures[exp_name]['keywords']['filter'] = filter
exposures[exp_name]['keywords']['file'] = file
res2 = re.split('/',file)
#for r in res2:
# if string.find(r,filter) != -1:
# print r
# exposures[exp_name]['keywords']['date'] = r.replace(filter + '_','')
# exposures[exp_name]['keywords']['fil_directory'] = r
# search_params['fil_directory'] = r
kws = utilities.get_header_kw(file,['CRVAL1','CRVAL2','ROTATION','OBJECT','GABODSID','CONFIG','EXPTIME','AIRMASS','INSTRUM','PPRUN','BADCCD','FILTER']) # return KEY/NA if not SUBARU
''' figure out PPRUN '''
import commands
readlink = commands.getoutput('readlink -f ' + file)
res = re.split('SUBARU/',readlink)
res = re.split('/',res[1])
kws['PPRUN'] = res[0]
''' figure out OBJNAME '''
res = re.split('SUBARU/',file)
print res, file
res = re.split('/',res[1])
print res
if res[0] == '': res = res[1:]
kws['OBJNAME'] = res[0]
print kws['OBJNAME'], 'OBJNAME'
''' figure out a way to break into SKYFLAT, DOMEFLAT '''
ppid = str(os.getppid())
command = 'dfits ' + file
file = commands.getoutput(command)
import string
if string.find(file,'SKYFLAT') != -1: exposures[exp_name]['keywords']['FLAT_TYPE'] = 'SKYFLAT'
elif string.find(file,'DOMEFLAT') != -1: exposures[exp_name]['keywords']['FLAT_TYPE'] = 'DOMEFLAT'
import string
file = re.split('\n',file)
for line in file:
print line
if string.find(line,'Flat frame:') != -1 and string.find(line,'illum') != -1:
import re
res = re.split('SET',line)
if len(res) > 1:
res = re.split('_',res[1])
set = res[0]
exposures[exp_name]['keywords']['FLAT_SET'] = set
res = re.split('illum',line)
res = re.split('\.',res[1])
smooth = res[0]
exposures[exp_name]['keywords']['SMOOTH'] = smooth
break
for kw in kws.keys():
exposures[exp_name]['keywords'][kw] = kws[kw]
exposures[exp_name]['keywords']['SUPA'] = exp_name
#exposures[exp_name]['keywords']['OBJNAME'] = OBJNAME
print exposures[exp_name]['keywords']
save_exposure(exposures[exp_name]['keywords'])
#raw_input()
except KeyboardInterrupt:
raise
except:
ppid_loc = str(os.getppid())
print traceback.print_exc(file=sys.stdout)
print sys.exc_info()
print 'something else failed',ppid, ppid_loc
return exposures
def run_telarchive(ra,dec,objname):
from ephem import *
coord = Equatorial(str(ra/15.),str(dec))
ra = str(coord.get()[0]).replace(':',' ')
dec = str(coord.get()[1]).replace(':',' ')
print 'ra','dec',ra,dec
import commands, re, string
command = 'python dosearch.py --coords="' + ra + ' ' + dec + '" 6.0'
print command
out = commands.getoutput(command)
#i = open('ij','w')
#i.write(out)
#i.close()
#out = open('ij','r').read()
print out
res = re.split('\n',out)
print res
d = {}
for i in res:
res_t = re.split('\t',i)
if len(res_t) > 1:
if res_t[1] != '':
name = re.split('\s+',re.split(':',res_t[1])[0])[0]
d[name + '_info'] = ' '
if string.find(re.split(':',res_t[1])[1],'No data found') != -1:
d[name + '_data'] = 0
elif string.find(re.split(':',res_t[1])[0],'Sloan Digital') != -1:
d[name + '_data'] = 1
else:
print res_t[1]
a = re.split(':',res_t[1])[1]
print a
b = re.split('\(',a)[1]
c = re.split('\s+',b)[0]
d[name + '_data'] = c
else: d[name + '_info'] += res_t[2] + '; '
print objname, d
return d
def get_observations():
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
db_keys = describe_db(c)
command = "CREATE TABLE IF NOT EXISTS telarchive_db ( id MEDIUMINT NOT NULL AUTO_INCREMENT, PRIMARY KEY (id))"
print command
#c.execute("DROP TABLE IF EXISTS telarchive_db")
c.execute(command)
keystop = ['PPRUN','ROTATION','OBJNAME']
list = reduce(lambda x,y: x + ',' + y, keystop)
command="SELECT * from illumination_db LEFT OUTER JOIN telarchive_db on telarchive_db.OBJNAME=illumination_db.OBJNAME where illumination_db.OBJNAME is not null and illumination_db.OBJNAME!='HDFN' and illumination_db.OBJNAME!='COSMOS' and telarchive_db.HST_data is NULL GROUP BY illumination_db.OBJNAME"
print command
c.execute(command)
results=c.fetchall()
for line in results:
dtop = {}
for i in range(len(db_keys)):
dtop[db_keys[i]] = str(line[i])
print dtop['CRVAL1'],dtop['CRVAL2'],dtop['OBJNAME']
dict = run_telarchive(float(dtop['CRVAL1']),dtop['CRVAL2'],dtop['OBJNAME'])
OBJNAME = dtop['OBJNAME']
dict['OBJNAME'] = OBJNAME
floatvars = {}
stringvars = {}
#copy array but exclude lists
import string
letters = string.ascii_lowercase + string.ascii_uppercase.replace('E','') + '_' + '-'
for ele in dict.keys():
print ele, dict[ele]
type = 'float'
for l in letters:
if string.find(str(dict[ele]),l) != -1 or dict[ele] == ' ':
type = 'string'
if type == 'float':
floatvars[ele] = str(float(dict[ele]))
elif type == 'string':
stringvars[ele] = dict[ele]
# make database if it doesn't exist
print 'floatvars', floatvars
print 'stringvars', stringvars
for column in stringvars:
try:
command = 'ALTER TABLE telarchive_db ADD ' + column + ' varchar(240)'
c.execute(command)
except: nope = 1
for column in floatvars:
try:
command = 'ALTER TABLE telarchive_db ADD ' + column + ' float(30)'
c.execute(command)
except: nope = 1
c.execute("SELECT OBJNAME from telarchive_db where OBJNAME = '" + OBJNAME + "'")
results = c.fetchall()
print results
if len(results) > 0:
print 'already added'
else:
command = "INSERT INTO telarchive_db (OBJNAME) VALUES ('" + OBJNAME + "')"
print command
c.execute(command)
import commands
vals = ''
for key in stringvars.keys():
print key, stringvars[key]
vals += ' ' + key + "='" + str(stringvars[key]) + "',"
for key in floatvars.keys():
print key, floatvars[key]
vals += ' ' + key + "='" + floatvars[key] + "',"
vals = vals[:-1]
command = "UPDATE telarchive_db set " + vals + " WHERE OBJNAME='" + OBJNAME + "'"
print command
c.execute(command)
if __name__ == '__main__':
import sys, os
tmpdir_root = sys.argv[1] + '/'
os.chdir(tmpdir_root)
tmpdir = tmpdir_root + '/tmp/'
os.system('mkdir -p ' + tmpdir)
astrom = 'solve-field'
if len(sys.argv)>2:
astrom = sys.argv[2]
select_analyze()
|
deapplegate/wtgpipeline
|
non_essentials/calc_test/calc_tmpsave.save.py
|
Python
|
mit
| 206,007
|
[
"Galaxy"
] |
f5f413f26d90cfb8e58991e33da24fc1ff1715694a640b2640fb2d9f2e3a1fa9
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=no-init,invalid-name,too-many-locals,too-few-public-methods
from __future__ import (absolute_import, division, print_function)
import systemtesting
from mantid.simpleapi import *
# These tests check the correctness of the structure factor calculation for some common crystal structures.
# All structure factors for comparison have been calculated using VESTA 3.2.1, which is described in the following
# publication:
#
# K. Momma and F. Izumi, "VESTA 3 for three-dimensional visualization of crystal,
# volumetric and morphology data," J. Appl. Crystallogr., 44, 1272-1276 (2011)
#
# http://dx.doi.org/10.1107/S0021889811038970
#
# All crystal structure data have been acquired from http://www.crystallography.net/. DOIs of the original
# papers with the published structures are given in the tests.
#
# Isotropic thermal parameters are rounded or arbitrary (the tests are meant for checking the calculations only).
class ReflectionCheckingTest(systemtesting.MantidSystemTest):
def runTest(self):
pass
def getPeakIndex(self, peakTable, hkl):
hklString = ' '.join([str(x) for x in hkl])
for i in range(peakTable.rowCount()):
r = peakTable.row(i)
if r['HKL'] == hklString:
return i
return -1
def checkReflections(self, peakTable, data, structureFactorPrecision = 1e-5):
for reference in data:
idx = self.getPeakIndex(peakTable, reference[0])
# Make sure the reflection exists.
self.assertTrue(idx != -1)
currentPeak = peakTable.row(idx)
self.assertEqual([int(x) for x in currentPeak['HKL'].split()], reference[0])
self.assertDelta(float(currentPeak['d']), reference[1], 1e-4)
fSquaredReference = reference[2] ** 2 * reference[3]
print(reference[0], fSquaredReference, float(currentPeak['Intensity']))
self.assertDelta(float(currentPeak['Intensity']) / fSquaredReference, 1.0, structureFactorPrecision)
class POLDICreatePeaksFromCellTestSiO2(ReflectionCheckingTest):
"""Structure factor check for:
SiO2, 10.1107/S0108768105005240"""
data = [
([1, 0, 0], 4.25588, 8.27544, 6),
([1, 0, -1], 3.34393, 22.1494, 6),
([0, 0, 3], 1.80193, 8.70574, 2),
([2, 2, 0], 1.22857, 14.4884, 3),
([4, -1, 4], 0.88902, 9.14321, 6)
]
def runTest(self):
peaks_SiO2 = PoldiCreatePeaksFromCell(
SpaceGroup="P 32 2 1",
Atoms="Si 0.4723 0.0 2/3 1.0 0.0075; O 0.416 0.2658 0.7881 1.0 0.0175",
a=4.91427, c=5.4058, LatticeSpacingMin=0.885)
self.assertEqual(peaks_SiO2.rowCount(), 118)
self.checkReflections(peaks_SiO2, self.data)
class POLDICreatePeaksFromCellTestAl2O3(ReflectionCheckingTest):
"""Structure factor check for:
Al2O3, 10.1107/S0021889890002382"""
data = [
([1, 0, -2], 3.481144, 21.873, 6),
([1, 0, 4], 2.551773, 23.6714, 6),
([0, 0, 6], 2.165933, 68.8749, 2),
([5, -2, -5], 0.88880, 23.6113, 12)
]
def runTest(self):
peaks_Al2O3 = PoldiCreatePeaksFromCell(
SpaceGroup="R -3 c",
Atoms="Al 0 0 0.35216 1.0 0.009; O 0.30668 0 1/4 1.0 0.0125",
a=4.7605, c=12.9956, LatticeSpacingMin=0.885)
self.assertEqual(peaks_Al2O3.rowCount(), 44)
self.checkReflections(peaks_Al2O3, self.data)
class POLDICreatePeaksFromCellTestFeTiO3(ReflectionCheckingTest):
"""Structure factor check for:
FeTiO3, 10.1007/s00269-007-0149-7
Note: Ti replaced by Zr"""
data = [
([0, 0, 3], 4.6970, 2.0748, 2),
([1, 0, 1], 4.20559, 1.60512, 6),
([1, 0, 4], 2.75153, 76.1855, 6),
([5, -4, 6], 0.88986, 100.244, 6)
]
def runTest(self):
peaks_FeTiO3 = PoldiCreatePeaksFromCell(
SpaceGroup="R -3",
Atoms="Fe 0 0 0.35543 1.0 0.005; Zr 0 0 0.14643 1.0 0.004; O 0.31717 0.02351 0.24498 1.0 0.006",
a=5.0881, c=14.091, LatticeSpacingMin=0.885)
self.assertEqual(peaks_FeTiO3.rowCount(), 108)
self.checkReflections(peaks_FeTiO3, self.data, 6e-5)
class POLDICreatePeaksFromCellTestCO(ReflectionCheckingTest):
"""Structure factor check for:
CO, 10.1007/BF01339658
Notes: Non-centrosymmetric, cubic, negative coordinates"""
data = [
([1, 1, 0], 3.98101, 1.93291, 12),
([1, 1, -1], 3.25048, 40.6203, 4),
([2, 0, 0], 2.815, 37.248, 6),
([6, 2, 0], 0.89018, 9.45489, 12)
]
def runTest(self):
peaks_CO = PoldiCreatePeaksFromCell(
SpaceGroup="P 21 3",
Atoms="C -0.042 -0.042 -0.042 1.0 0.0125; O 0.067 0.067 0.067 1.0 0.0125",
a=5.63, LatticeSpacingMin=0.885)
self.assertEqual(peaks_CO.rowCount(), 91)
self.checkReflections(peaks_CO, self.data, 1e-5)
class POLDICreatePeaksFromCellTestBetaQuartz(ReflectionCheckingTest):
"""Structure factor check for:
SiO2 (beta-quartz, high temperature), 10.1127/ejm/2/1/0063
Notes: Non-centrosymmetric, hexagonal, with coordinate 1/6"""
data = [
([1, 0, 0], 4.32710, 7.74737, 6),
([1, 0, 1], 3.38996, 19.7652, 12),
([1, 0, 2], 2.30725, 2.96401, 12),
([1, 0, 6], 0.88968, 3.15179, 12)
]
def runTest(self):
peaks_betaSiO2 = PoldiCreatePeaksFromCell(
SpaceGroup="P 62 2 2",
Atoms="Si 1/2 0 0 1.0 0.025; O 0.41570 0.20785 1/6 1.0 0.058",
a=4.9965, c=5.4546, LatticeSpacingMin=0.885)
self.assertEqual(peaks_betaSiO2.rowCount(), 65)
self.checkReflections(peaks_betaSiO2, self.data, 1e-5)
|
mganeva/mantid
|
Testing/SystemTests/tests/analysis/POLDICreatePeaksFromCellTest.py
|
Python
|
gpl-3.0
| 6,042
|
[
"CRYSTAL"
] |
ad37aaf5b423170b9b734efbb330e35299544a98dba690fa663ec2c6a810b1f2
|
# -*- coding: utf-8 -*-
import datetime
from time import sleep
from lettuce import *
from django.utils.datastructures import SortedDict
from rapidsms.contrib.locations.models import *
from survey.features.page_objects.aggregates import AggregateStatusPage, DownloadExcelPage, InvestigatorReportPage
from survey.features.page_objects.survey_completion_rates import SurveyCompletionRatesPage
from survey.models import Survey, EnumerationArea, HouseholdMemberGroup
from survey.models.batch import Batch
from survey.models.households import Household, HouseholdMember
from survey.models.investigator import Investigator
from survey import investigator_configs
@step(u'And I have 2 batches with one open')
def and_i_have_2_batches_with_one_open(step):
world.batch_1 = Batch.objects.create(order=1, name="Batch A", survey=world.survey_1)
world.batch_2 = Batch.objects.create(order=2, name="Batch B", survey=world.survey_2)
world.kampala_county = Location.objects.get(name="Kampala County")
world.someother_county = Location.objects.create(name="Some County", tree_parent=world.kampala_county.tree_parent)
world.batch_1.open_for_location(world.kampala_county.tree_parent)
@step(u'And I have eas in the lowest location')
def and_i_have_eas_in_the_lowest_location(step):
world.ea = EnumerationArea.objects.create(name="EA", survey=world.survey_1)
world.ea.locations.add(world.kampala_village)
@step(u'And one household has completed that open batch')
def and_one_household_has_completed_that_open_batch(step):
world.household_1.completed_batches.get_or_create(batch=world.batch_1)
@step(u'And I visit aggregate status page')
def and_i_visit_aggregate_status_page(step):
world.page = AggregateStatusPage(world.browser)
world.page.visit()
@step(u'Then I should see an option to select location hierarchically')
def then_i_should_see_an_option_to_select_location_hierarchically(step):
world.page.choose_location({'district': 'Kampala', 'county': 'Kampala County'})
@step(u'And I should see an option to select batch')
def and_i_should_see_an_option_to_select_batch(step):
world.page.check_if_batches_present([world.batch_1])
@step(u'And I should see a get status button')
def and_i_should_see_a_get_status_button(step):
world.page.check_get_status_button_presence()
@step(u'And I have 2 investigators with households')
def and_i_have_2_investigators_with_households(step):
investigator = Investigator.objects.create(name="Rajini", mobile_number="123", location=world.kampala_county)
investigator_2 = Investigator.objects.create(name="Batman", mobile_number="1234", location=world.someother_county)
uid_counter = 0
for index in range(investigator_configs.NUMBER_OF_HOUSEHOLD_PER_INVESTIGATOR):
Household.objects.create(investigator = investigator, uid=uid_counter+index)
Household.objects.create(investigator = investigator_2, uid=uid_counter+1+index)
uid_counter = uid_counter + 2
world.investigator = investigator
world.investigator_2 = investigator_2
@step(u'And I choose a location and an open batch')
def and_i_choose_a_location_and_an_open_batch(step):
locations = SortedDict()
locations['district'] = 'Kampala'
locations['county'] = 'Kampala County'
world.page.choose_location(locations)
world.page.choose_batch(world.batch_1)
@step(u'And I change my mind to select all districts')
def and_i_change_my_mind_to_select_all_districts(step):
world.page.select_all_district()
@step(u'And I click get status button')
def and_i_click_get_status_button(step):
world.page.submit()
@step(u'And I should see all districts as location selected')
def and_i_should_see_all_districts_location_selected(step):
world.page.see_all_districts_location_selected()
@step(u'Then I should see number of households and clusters completed and pending')
def then_i_should_see_number_of_households_and_clusters_completed_and_pending(step):
world.page.assert_status_count(pending_households=20, completed_housesholds=0, pending_clusters=2, completed_clusters=0)
@step(u'And I should see a list of investigators with corresponding phone numbers and pending households')
def and_i_should_see_a_list_of_investigators_with_corresponding_phone_numbers_and_pending_households(step):
world.page.check_presence_of_investigators(world.investigator, world.investigator_2)
@step(u'And I choose a location and a closed batch')
def and_i_choose_a_location_and_a_closed_batch(step):
world.page.choose_location({'district': 'Kampala'})
world.page.choose_batch(world.batch_2)
@step(u'And I should see a message that says that this batch is currently closed')
def and_i_should_see_a_message_that_says_that_this_batch_is_currently_closed(step):
world.page.assert_presence_of_batch_is_closed_message()
@step(u'And I visit download excel page')
def and_i_visit_download_excel_page(step):
world.page = DownloadExcelPage(world.browser)
world.page.visit()
@step(u'And I visit district aggregate page')
def and_i_visit_district_aggregate_page(step):
world.page = SurveyCompletionRatesPage(world.browser)
world.page.visit()
@step(u'Then I should see a table for completion rates')
def then_i_should_see_a_table_for_completion_rates(step):
world.page.see_completion_rates_table()
@step(u'And I should see descendants in the table')
def and_i_should_see_descendants_in_the_table(step):
world.page.is_text_present(world.kampala_subcounty.name)
@step(u'When I click on descendant name')
def when_i_click_on_descendant_name(step):
world.page.click_link_by_text(world.kampala_subcounty.name)
@step(u'Then I should see status page for that location')
def then_i_should_see_status_page_for_that_location(step):
world.page.see_completion_rates_table()
world.page.is_text_present(world.kampala_parish.name)
@step(u'And I choose ea and an open batch')
def and_i_choose_ea_and_an_open_batch(step):
locations = SortedDict()
locations['district'] = world.kampala_district.name
locations['county'] = world.kampala_county.name
locations['subcounty'] = world.kampala_subcounty.name
locations['parish'] = world.kampala_parish.name
world.page.choose_location(locations)
world.page.choose_batch(world.batch_1)
world.page.choose_ea(world.ea)
@step(u'Then I should see a table for household completion rates')
def then_i_should_see_a_table_for_household_completion_rates(step):
world.page.see_houdehold_completion_table()
@step(u'And I should see household details text')
def and_i_should_see_household_details_text(step):
world.page.is_text_present("Survey Completion by household in %s EA" % world.ea.name)
world.page.is_text_present("%s" % world.household_1.uid)
world.page.is_text_present("%s" % world.household_1.household_member.all().count())
@step(u'And I should see investigator details text')
def and_i_should_see_investigator_details_text(step):
world.page.is_text_present('Investigator: %s(%s)' % (world.investigator.name, world.investigator.mobile_number))
@step(u'And I have an investigator and households')
def and_i_have_an_investigator_and_households(step):
world.batch = Batch.objects.create(survey=world.survey_1, name="Haha")
world.investigator = Investigator.objects.create(name="some_investigator", mobile_number="123456784", ea=world.ea)
world.household_1 = Household.objects.create(investigator=world.investigator, uid=101, ea=world.ea, survey=world.survey_1)
world.household_2 = Household.objects.create(investigator=world.investigator, uid=102, ea=world.ea, survey=world.survey_1)
world.member_2 = HouseholdMember.objects.create(household=world.household_2,
date_of_birth=datetime.datetime(2000, 02, 02))
@step(u'And I should see percent completion')
def and_i_should_see_percent_completion(step):
world.page.is_text_present('Percent Completion: 50')
@step(u'And I have 2 surveys with one batch each')
def and_i_have_2_surveys_with_one_batch_each(step):
world.batch_1 = Batch.objects.create(name='batch1', order=1, survey=world.survey_1)
world.batch_2 = Batch.objects.create(name='batch2', order=1, survey=world.survey_2)
@step(u'When I select survey 2 from survey list')
def when_i_select_survey_2_from_survey_list(step):
world.page.select('survey',[world.survey_2.id])
@step(u'Then I should see batch2 in batch list')
def then_i_should_see_batch2_in_batch_list(step):
world.page.see_select_option([world.batch_2.name], 'batch')
@step(u'And I should not see batch1 in batch list')
def and_i_should_not_see_batch1_in_batch_list(step):
world.page.option_not_present([world.batch_1.name], 'batch')
@step(u'When I select survey 1 from survey list')
def when_i_select_survey_1_from_survey_list(step):
world.page.select('survey', [world.survey_1.id])
@step(u'Then I should see batch1 in batch list')
def then_i_should_see_batch1_in_batch_list(step):
world.page.see_select_option([world.batch_1.name], 'batch')
@step(u'And I should not see batch2 in batch list')
def and_i_should_not_see_batch2_in_batch_list(step):
world.page.option_not_present([world.batch_2.name], 'batch')
@step(u'And I should see title message')
def and_i_should_see_title_message(step):
world.page.is_text_present('Survey Completion by Region/District')
@step(u'When I visit investigator report page')
def when_i_visit_investigator_report_page(step):
world.page = InvestigatorReportPage(world.browser)
world.page.visit()
@step(u'Then I should see title-text message')
def then_i_should_see_title_text_message(step):
world.page.is_text_present('Choose survey to get investigators who completed the survey')
@step(u'And I should see dropdown with two surveys')
def and_i_should_see_dropdown_with_two_surveys(step):
world.page.see_select_option([world.survey_1.name, world.survey_2.name], 'survey')
@step(u'And I should see generate report button')
def and_i_should_see_generate_report_button(step):
assert world.browser.find_by_css("#download-investigator-form")[0].find_by_tag('button')[0].text == "Generate Report"
@step(u'And I have 100 locations')
def and_i_have_100_locations(step):
country = LocationType.objects.create(name="Country", slug="country")
district = LocationType.objects.create(name="District", slug="district")
world.uganda = Location.objects.create(name="uganda", type=country)
for i in xrange(100):
Location.objects.create(name="name"+str(i), tree_parent=world.uganda, type=district)
@step(u'Then I should see district completion table paginated')
def then_i_should_see_district_completion_table_paginated(step):
world.page.validate_pagination()
@step(u'And I have one batch open in those locations')
def and_i_have_one_batch_open_in_those_locations(step):
world.batch_12 = Batch.objects.create(order=12, name="Batch A", survey=world.survey_1)
world.batch_12.open_for_location(world.uganda)
@step(u'When I select one of the survey')
def when_i_select_one_of_the_survey(step):
world.page.see_select_option([world.survey_1.name, world.survey_2.name], 'survey')
@step(u'Then I should batches in that survey')
def then_i_should_batches_in_that_survey(step):
world.page.validate_select_option(world.batch_1)
@step(u'And I click generate report button')
def and_i_click_generate_report_button(step):
world.page.find_by_css("#generate_report", "Generate Report")
@step(u'And I have three surveys')
def and_i_have_three_surveys(step):
world.survey_1 = Survey.objects.create(name="Haha Survey")
world.survey_2 = Survey.objects.create(name="Hoho Survey")
@step(u'And I have batches in those surveys')
def and_i_have_batches_in_those_surveys(step):
world.batch_1 = Batch.objects.create(order=1, name="Batch A haha", survey=world.survey_1)
world.batch_2 = Batch.objects.create(order=2, name="Batch A hoho", survey=world.survey_2)
@step(u'Then I should only see the batches in that survey')
def then_i_should_only_see_the_batches_in_that_survey(step):
world.page.see_select_option(['All', str(world.batch_2.name)], 'batch')
@step(u'When I choose a batch in that survey')
def when_i_choose_a_batch_in_that_survey(step):
world.page.select('batch', [world.batch_2.id])
@step(u'Then I should be able to export the responses for that batch')
def then_i_should_be_able_to_export_the_responses_for_that_batch(step):
world.page.find_by_css("#export_excel", "Export to spreadsheet")
@step(u'When I select one of the two surveys')
def when_i_select_one_of_the_two_surveys(step):
world.page.select('survey', [str(world.survey_2.id)])
@step(u'And I have general member group')
def and_i_have_general_member_group(step):
HouseholdMemberGroup.objects.create(order=1, name="GENERAL")
|
antsmc2/mics
|
survey/features/aggregates-steps.py
|
Python
|
bsd-3-clause
| 12,809
|
[
"VisIt"
] |
068ff9a38d28392f75097894ebca695025a9a1ffe901e0a090a05a260b4bab62
|
#!/usr/bin/env python
__author__ = 'Mike McCann,Duane Edgington,Reiko Michisaki'
__copyright__ = '2013'
__license__ = 'GPL v3'
__contact__ = 'duane at mbari.org'
__doc__ = '''
loader for ESP CANON activities in September 2013
Mike McCann; Modified by Duane Edgington and Reiko Michisaki
MBARI 02 September 2013
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
import os
import sys
import datetime # needed for glider data
import time # for startdate, enddate args
os.environ['DJANGO_SETTINGS_MODULE']='settings'
project_dir = os.path.dirname(__file__)
# the next line makes it possible to find CANON
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../")) # this makes it possible to find CANON, one directory up
from CANON import CANONLoader
# building input data sources object
from socket import gethostname
hostname=gethostname()
print hostname
if hostname=='odss-test.shore.mbari.org':
cl = CANONLoader('stoqs_september2011', 'CANON - September 2011')
else:
cl = CANONLoader('stoqs_september2013', 'CANON - September 2013')
# default location of thredds and dods data:
cl.tdsBase = 'http://odss.mbari.org/thredds/'
cl.dodsBase = cl.tdsBase + 'dodsC/'
# Set start and end dates for mooring, twice per day. In the morning and afternoon.
t =time.strptime("2013-09-10 0:01", "%Y-%m-%d %H:%M")
##startdate=t[:6]
##ts=time.time()-(13*60*60)
##st=datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M')
##t=time.strptime(st,"%Y-%m-%d %H:%M")
startdate=t[:6]
t =time.strptime("2013-10-29 0:01", "%Y-%m-%d %H:%M")
enddate=t[:6]
print startdate, enddate
######################################################################
# ESP MOORINGS
######################################################################
cl.bruce_moor_base = cl.dodsBase + 'CANON_september2013/Platforms/Moorings/ESP_Bruce/NetCDF/'
cl.bruce_moor_files = ['Bruce_ctd.nc']
cl.bruce_moor_parms = [ 'TEMP','PSAL','chl','xmiss','oxygen','beamc',
]
cl.bruce_moor_startDatetime = datetime.datetime(*startdate[:])
cl.bruce_moor_endDatetime = datetime.datetime(*enddate[:])
cl.mack_moor_base = cl.dodsBase + 'CANON_september2013/Platforms/Moorings/ESP_Mack/NetCDF/'
cl.mack_moor_files = ['Mack_ctd.nc']
cl.mack_moor_parms = [ 'TEMP','PSAL','chl','xmiss','oxygen','beamc',
]
cl.mack_moor_startDatetime = datetime.datetime(*startdate[:])
cl.mack_moor_endDatetime = datetime.datetime(*enddate[:])
cl.process_command_line()
if cl.args.test:
cl.loadBruceMoor(stride=1)
cl.loadMackMoor(stride=1)
elif cl.args.optimal_stride:
cl.loadBruceMoor(stride=1)
cl.loadMackMoor(stride=1)
else:
cl.loadBruceMoor(stride=1)
cl.loadMackMoor(stride=1)
|
josephmfaulkner/stoqs
|
stoqs/loaders/CANON/esp_loadsep2013.py
|
Python
|
gpl-3.0
| 2,775
|
[
"NetCDF"
] |
9101d62a0e2108c407237da9cf40693f296ccb642471de8b97a8b678dba82df6
|
"""
Module simplifying manipulation of XML described at
http://libvirt.org/formatdomain.html
"""
import logging
from autotest.client.shared import error
from virttest import xml_utils
from virttest.libvirt_xml import base, accessors, xcepts
from virttest.libvirt_xml.devices import librarian
class VMXMLDevices(list):
"""
List of device instances from classes handed out by librarian.get()
"""
@staticmethod
def __type_check__(other):
try:
# Raise error if object isn't dict-like or doesn't have key
device_tag = other['device_tag']
# Check that we have support for this type
librarian.get(device_tag)
except (AttributeError, TypeError, xcepts.LibvirtXMLError):
# Required to always raise TypeError for list API in VMXML class
raise TypeError("Unsupported item type: %s" % str(type(other)))
def __setitem__(self, key, value):
self.__type_check__(value)
super(VMXMLDevices, self).__setitem__(key, value)
return self
def append(self, value):
self.__type_check__(value)
super(VMXMLDevices, self).append(value)
return self
def extend(self, iterable):
# Make sure __type_check__ happens
for item in iterable:
self.append(item)
return self
def by_device_tag(self, tag):
result = VMXMLDevices()
for device in self:
if device.device_tag == tag:
result.append(device)
return result
class VMXMLBase(base.LibvirtXMLBase):
"""
Accessor methods for VMXML class properties (items in __slots__)
Properties:
hypervisor_type: string, hypervisor type name
get: return domain's type attribute value
set: change domain type attribute value
del: raise xcepts.LibvirtXMLError
vm_name: string, name of the vm
get: return text value of name tag
set: set text value of name tag
del: raise xcepts.LibvirtXMLError
uuid: string, uuid string for vm
get: return text value of uuid tag
set: set text value for (new) uuid tag (unvalidated)
del: remove uuid tag
vcpu, max_mem, current_mem: integers
get: returns integer
set: set integer
del: removes tag
dumpcore: string, control guest OS memory dump
get: return text value
set: set 'on' or 'off' for guest OS memory dump
del: removes tag
numa: dictionary
get: return dictionary of numatune/memory attributes
set: set numatune/memory attributes from dictionary
del: remove numatune/memory tag
on_poweroff: string, action to take when the guest requests a poweroff
get: returns text value of on_poweroff tag
set: set test of on_poweroff tag
del: remove on_poweroff tag
on_reboot: string, action to take when the guest requests a reboot
get: returns text value of on_reboot tag
set: set test of on_reboot tag
del: remove on_reboot tag
on_crash: string, action to take when the guest crashes
get: returns text value of on_crash tag
set: set test of on_crash tag
del: remove on_crash tag
devices: VMXMLDevices (list-like)
get: returns VMXMLDevices instance for all devices
set: Define all devices from VMXMLDevices instance
del: remove all devices
cputune: VMCPUTuneXML
get: return VMCPUTuneXML instance for the domain.
set: Define cputune tag from a VMCPUTuneXML instance.
del: remove cputune tag
cpu: VMCPUXML
get: return VMCPUXML instance for the domain.
set: Define cpu tag from a VMCPUXML instance.
del: remove cpu tag
current_vcpu: string, 'current' attribute of vcpu tag
get: return a string for 'current' attribute of vcpu
set: change 'current' attribute of vcpu
del: remove 'current' attribute of vcpu
placement: string, 'placement' attribute of vcpu tag
get: return a string for 'placement' attribute of vcpu
set: change 'placement' attribute of vcpu
del: remove 'placement' attribute of vcpu
emulatorpin: string, cpuset value (see man virsh: cpulist)
get: return text value of cputune/emulatorpin attributes
set: set cputune/emulatorpin attributes from string
del: remove cputune/emulatorpin tag
features: VMFeaturesXML
get: return VMFeaturesXML instances for the domain.
set: define features tag from a VMFeaturesXML instances.
del: remove features tag
mem_backing: VMMemBackingXML
get: return VMMemBackingXML instances for the domain.
set: define memoryBacking tag from a VMMemBackingXML instances.
del: remove memoryBacking tag
"""
# Additional names of attributes and dictionary-keys instances may contain
__slots__ = ('hypervisor_type', 'vm_name', 'uuid', 'vcpu', 'max_mem',
'current_mem', 'dumpcore', 'numa', 'devices', 'seclabel',
'cputune', 'placement', 'current_vcpu', 'os', 'cpu',
'pm', 'on_poweroff', 'on_reboot', 'on_crash', 'features',
'mb')
__uncompareable__ = base.LibvirtXMLBase.__uncompareable__
__schema_name__ = "domain"
def __init__(self, virsh_instance=base.virsh):
accessors.XMLAttribute(property_name="hypervisor_type",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='domain',
attribute='type')
accessors.XMLElementText(property_name="vm_name",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='name')
accessors.XMLElementText(property_name="uuid",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='uuid')
accessors.XMLElementInt(property_name="vcpu",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='vcpu')
accessors.XMLAttribute(property_name="current_vcpu",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='vcpu',
attribute='current')
accessors.XMLAttribute(property_name="placement",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='vcpu',
attribute='placement')
accessors.XMLElementInt(property_name="max_mem",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='memory')
accessors.XMLAttribute(property_name="dumpcore",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='memory',
attribute='dumpCore')
accessors.XMLElementInt(property_name="current_mem",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='currentMemory')
accessors.XMLElementNest(property_name='os',
libvirtxml=self,
parent_xpath='/',
tag_name='os',
subclass=VMOSXML,
subclass_dargs={
'virsh_instance': virsh_instance})
accessors.XMLElementDict(property_name="numa",
libvirtxml=self,
forbidden=None,
parent_xpath='numatune',
tag_name='memory')
accessors.XMLElementNest(property_name='cputune',
libvirtxml=self,
parent_xpath='/',
tag_name='cputune',
subclass=VMCPUTuneXML,
subclass_dargs={
'virsh_instance': virsh_instance})
accessors.XMLElementNest(property_name='cpu',
libvirtxml=self,
parent_xpath='/',
tag_name='cpu',
subclass=VMCPUXML,
subclass_dargs={
'virsh_instance': virsh_instance})
accessors.XMLElementNest(property_name='pm',
libvirtxml=self,
parent_xpath='/',
tag_name='pm',
subclass=VMPMXML,
subclass_dargs={
'virsh_instance': virsh_instance})
accessors.XMLElementText(property_name="on_poweroff",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='on_poweroff')
accessors.XMLElementText(property_name="on_reboot",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='on_reboot')
accessors.XMLElementText(property_name="on_crash",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='on_crash')
accessors.XMLElementNest(property_name='features',
libvirtxml=self,
parent_xpath='/',
tag_name='features',
subclass=VMFeaturesXML,
subclass_dargs={
'virsh_instance': virsh_instance})
accessors.XMLElementNest(property_name='mb',
libvirtxml=self,
parent_xpath='/',
tag_name='memoryBacking',
subclass=VMMemBackingXML,
subclass_dargs={
'virsh_instance': virsh_instance})
super(VMXMLBase, self).__init__(virsh_instance=virsh_instance)
def get_devices(self, device_type=None):
"""
Put all nodes of devices into a VMXMLDevices instance.
"""
devices = VMXMLDevices()
all_devices = self.xmltreefile.find('devices')
if device_type is not None:
device_nodes = all_devices.findall(device_type)
else:
device_nodes = all_devices
for node in device_nodes:
device_tag = node.tag
device_class = librarian.get(device_tag)
new_one = device_class.new_from_element(node,
virsh_instance=self.virsh)
devices.append(new_one)
return devices
def set_devices(self, value):
"""
Define devices based on contents of VMXMLDevices instance
"""
value_type = type(value)
if not issubclass(value_type, VMXMLDevices):
raise xcepts.LibvirtXMLError("Value %s Must be a VMXMLDevices or "
"subclass not a %s"
% (str(value), str(value_type)))
# Start with clean slate
exist_dev = self.xmltreefile.find('devices')
if exist_dev is not None:
self.del_devices()
if len(value) > 0:
devices_element = xml_utils.ElementTree.SubElement(
self.xmltreefile.getroot(), 'devices')
for device in value:
# Separate the element from the tree
device_element = device.xmltreefile.getroot()
devices_element.append(device_element)
self.xmltreefile.write()
def del_devices(self):
"""
Remove all devices
"""
self.xmltreefile.remove_by_xpath('/devices')
self.xmltreefile.write()
def get_seclabel(self):
"""
Return seclabel + child attribute dict list or raise LibvirtXML error
:return: None if no seclabel in xml,
list contains dict of seclabel's attributs and children.
"""
__children_list__ = ['label', 'baselabel', 'imagelabel']
seclabel_node = self.xmltreefile.findall("seclabel")
# no seclabel tag found in xml.
if seclabel_node == []:
raise xcepts.LibvirtXMLError("Seclabel for this domain does not "
"exist")
seclabels = []
for i in range(len(seclabel_node)):
seclabel = dict(seclabel_node[i].items())
for child_name in __children_list__:
child_node = seclabel_node[i].find(child_name)
if child_node is not None:
seclabel[child_name] = child_node.text
seclabels.append(seclabel)
return seclabels
def set_seclabel(self, seclabel_dict_list):
"""
Set seclabel of vm. Delete all seclabels if seclabel exists, create
new seclabels use dict values from given seclabel_dict_list in
xmltreefile.
"""
__attributs_list__ = ['type', 'model', 'relabel']
__children_list__ = ['label', 'baselabel', 'imagelabel']
# check the type of seclabel_dict_list and value.
if not isinstance(seclabel_dict_list, list):
raise xcepts.LibvirtXMLError("seclabel_dict_list should be a "
"instance of list, but not a %s.\n"
% type(seclabel_dict_list))
for seclabel_dict in seclabel_dict_list:
if not isinstance(seclabel_dict, dict):
raise xcepts.LibvirtXMLError("value in seclabel_dict_list"
"should be a instance of dict "
"but not a %s.\n"
% type(seclabel_dict))
seclabel_nodes = self.xmltreefile.findall("seclabel")
if seclabel_nodes is not None:
for i in range(len(seclabel_nodes)):
self.del_seclabel()
for i in range(len(seclabel_dict_list)):
seclabel_node = xml_utils.ElementTree.SubElement(
self.xmltreefile.getroot(),
"seclabel")
for key, value in seclabel_dict_list[i].items():
if key in __children_list__:
child_node = seclabel_node.find(key)
if child_node is None:
child_node = xml_utils.ElementTree.SubElement(
seclabel_node,
key)
child_node.text = value
elif key in __attributs_list__:
seclabel_node.set(key, value)
else:
continue
self.xmltreefile.write()
def del_seclabel(self):
"""
Remove the seclabel tag from a domain
"""
try:
self.xmltreefile.remove_by_xpath("/seclabel")
except (AttributeError, TypeError):
pass # Element already doesn't exist
self.xmltreefile.write()
def set_controller(self, controller_list):
"""
Set controller of vm. Create new controllers use xmltreefile
from given controller_list.
"""
# check the type of controller_list and value.
if not isinstance(controller_list, list):
raise xcepts.LibvirtXMLError("controller_element_list should be a"
"instance of list, but not a %s.\n"
% type(controller_list))
devices_element = self.xmltreefile.find("devices")
for contl in controller_list:
element = xml_utils.ElementTree.ElementTree(
file=contl.xml)
devices_element.append(element.getroot())
self.xmltreefile.write()
def del_controller(self, controller_type=None):
"""
Delete controllers according controller type
:return: None if deleting all controllers
"""
all_controllers = self.xmltreefile.findall("devices/controller")
del_controllers = []
for controller in all_controllers:
if controller.get("type") != controller_type:
continue
del_controllers.append(controller)
# no seclabel tag found in xml.
if del_controllers == []:
logging.debug("Controller %s for this domain does not "
"exist" % controller_type)
for controller in del_controllers:
self.xmltreefile.remove(controller)
class VMXML(VMXMLBase):
"""
Higher-level manipulations related to VM's XML or guest/host state
"""
# Must copy these here or there will be descriptor problems
__slots__ = []
def __init__(self, hypervisor_type='kvm', virsh_instance=base.virsh):
"""
Create new VM XML instance
"""
super(VMXML, self).__init__(virsh_instance=virsh_instance)
# Setup some bare-bones XML to build upon
self.xml = u"<domain type='%s'></domain>" % hypervisor_type
@staticmethod # static method (no self) needed b/c calls VMXML.__new__
def new_from_dumpxml(vm_name, options="", virsh_instance=base.virsh):
"""
Return new VMXML instance from virsh dumpxml command
:param vm_name: Name of VM to dumpxml
:param virsh_instance: virsh module or instance to use
:return: New initialized VMXML instance
"""
# TODO: Look up hypervisor_type on incoming XML
vmxml = VMXML(virsh_instance=virsh_instance)
vmxml['xml'] = virsh_instance.dumpxml(vm_name,
extra=options).stdout.strip()
return vmxml
@staticmethod
def new_from_inactive_dumpxml(vm_name, options="", virsh_instance=base.virsh):
"""
Return new VMXML instance of inactive domain from virsh dumpxml command
:param vm_name: Name of VM to dumpxml
:param options: virsh dumpxml command's options
:param virsh_instance: virsh module or instance to use
:return: New initialized VMXML instance
"""
if options.find("--inactive") == -1:
options += " --inactive"
return VMXML.new_from_dumpxml(vm_name, options, virsh_instance)
@staticmethod
def get_device_class(type_name):
"""
Return class that handles type_name devices, or raise exception.
"""
return librarian.get(type_name)
def undefine(self, options=None):
"""Undefine this VM with libvirt retaining XML in instance"""
return self.virsh.remove_domain(self.vm_name, options)
def define(self):
"""Define VM with virsh from this instance"""
result = self.virsh.define(self.xml)
if result.exit_status:
logging.debug("Define %s failed.\n"
"Detail: %s.", self.vm_name, result.stderr)
return False
return True
def sync(self, options=None):
"""Rebuild VM with the config file."""
# If target vm no longer exist, this will raise an exception.
try:
backup = self.new_from_dumpxml(self.vm_name)
except IOError:
logging.debug("Failed to backup %s.", self.vm_name)
backup = None
if not self.undefine(options):
raise xcepts.LibvirtXMLError("Failed to undefine %s."
% self.vm_name)
if not self.define():
if backup:
backup.define()
raise xcepts.LibvirtXMLError("Failed to define %s, from %s."
% (self.vm_name, self.xml))
@staticmethod
def vm_rename(vm, new_name, uuid=None, virsh_instance=base.virsh):
"""
Rename a vm from its XML.
:param vm: VM class type instance
:param new_name: new name of vm
:param uuid: new_vm's uuid, if None libvirt will generate.
:return: a new VM instance
"""
if vm.is_alive():
vm.destroy(gracefully=True)
vmxml = VMXML.new_from_dumpxml(vm_name=vm.name,
virsh_instance=virsh_instance)
backup = vmxml.copy()
# can't do in-place rename, must operate on XML
if not vmxml.undefine():
del vmxml # clean up temporary files
raise xcepts.LibvirtXMLError("Error reported while undefining VM")
# Alter the XML
vmxml.vm_name = new_name
if uuid is None:
# invalidate uuid so libvirt will regenerate
del vmxml.uuid
vm.uuid = None
else:
vmxml.uuid = uuid
vm.uuid = uuid
# Re-define XML to libvirt
logging.debug("Rename %s to %s.", vm.name, new_name)
# error message for failed define
error_msg = "Error reported while defining VM:\n"
try:
if not vmxml.define():
raise xcepts.LibvirtXMLError(error_msg + "%s"
% vmxml.get('xml'))
except error.CmdError, detail:
del vmxml # clean up temporary files
# Allow exceptions thrown here since state will be undefined
backup.define()
raise xcepts.LibvirtXMLError(error_msg + "%s" % detail)
# Keep names uniform
vm.name = new_name
return vm
@staticmethod
def set_pm_suspend(vm_name, mem="yes", disk="yes", virsh_instance=base.virsh):
"""
Add/set pm suspend Support
:params vm_name: Name of defined vm
:params mem: Enable suspend to memory
:params disk: Enable suspend to disk
"""
# Build a instance of class VMPMXML.
pm = VMPMXML()
pm.mem_enabled = mem
pm.disk_enabled = disk
# Set pm to the new instance.
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
vmxml.pm = pm
vmxml.sync()
@staticmethod
def set_vm_vcpus(vm_name, value, current=None, virsh_instance=base.virsh):
"""
Convenience method for updating 'vcpu' and 'current' attribute property
of a defined VM
:param vm_name: Name of defined vm to change vcpu elemnet data
:param value: New data value, None to delete.
:param current: New current value, None will not change current value
"""
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
if value is not None:
if current is not None:
try:
current_int = int(current)
except ValueError:
raise xcepts.LibvirtXMLError("Invalid 'current' value '%s'"
% current)
if current_int > value:
raise xcepts.LibvirtXMLError(
"The cpu current value %s is larger than max number %s"
% (current, value))
else:
vmxml['current_vcpu'] = current
vmxml['vcpu'] = value # call accessor method to change XML
else: # value is None
del vmxml.vcpu
vmxml.undefine()
vmxml.define()
# Temporary files for vmxml cleaned up automatically
# when it goes out of scope here.
@staticmethod
def check_cpu_mode(mode):
"""
Check input cpu mode invalid or not.
:param mode: the mode of cpu:'host-model'...
"""
# Possible values for the mode attribute are:
# "custom", "host-model", "host-passthrough"
cpu_mode = ["custom", "host-model", "host-passthrough"]
if mode.strip() not in cpu_mode:
raise xcepts.LibvirtXMLError(
"The cpu mode '%s' is invalid!" % mode)
def get_disk_all(self):
"""
Return VM's disk from XML definition, None if not set
"""
disk_nodes = self.xmltreefile.find('devices').findall('disk')
disks = {}
for node in disk_nodes:
dev = node.find('target').get('dev')
disks[dev] = node
return disks
@staticmethod
def get_disk_source(vm_name, option="", virsh_instance=base.virsh):
"""
Get block device of a defined VM's disks.
:param vm_name: Name of defined vm.
:param option: extra option.
"""
vmxml = VMXML.new_from_dumpxml(vm_name, option,
virsh_instance=virsh_instance)
disks = vmxml.get_disk_all()
return disks.values()
@staticmethod
def get_disk_blk(vm_name, virsh_instance=base.virsh):
"""
Get block device of a defined VM's disks.
:param vm_name: Name of defined vm.
"""
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
disks = vmxml.get_disk_all()
return disks.keys()
@staticmethod
def get_disk_count(vm_name, virsh_instance=base.virsh):
"""
Get count of VM's disks.
:param vm_name: Name of defined vm.
"""
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
disks = vmxml.get_disk_all()
if disks is not None:
return len(disks)
return 0
@staticmethod
def get_disk_attr(vm_name, target, tag, attr, virsh_instance=base.virsh):
"""
Get value of disk tag attribute for a given target dev.
"""
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
attr_value = None
try:
disk = vmxml.get_disk_all()[target]
if tag in ["driver", "boot", "address", "alias", "source"]:
attr_value = disk.find(tag).get(attr)
except AttributeError:
logging.error("No %s/%s found.", tag, attr)
return attr_value
@staticmethod
def check_disk_exist(vm_name, disk_src, virsh_instance=base.virsh):
"""
Check if given disk exist in VM.
:param vm_name: Domain name.
:param disk_src: Domian disk source path or darget dev.
:return: True/False
"""
found = False
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
if not vmxml.get_disk_count(vm_name, virsh_instance=virsh_instance):
raise xcepts.LibvirtXMLError("No disk in domain %s." % vm_name)
blk_list = vmxml.get_disk_blk(vm_name, virsh_instance=virsh_instance)
disk_list = vmxml.get_disk_source(vm_name, virsh_instance=virsh_instance)
try:
file_list = []
for disk in disk_list:
file_list.append(disk.find('source').get('file'))
except AttributeError:
logging.debug("No 'file' type disk.")
if disk_src in file_list + blk_list:
found = True
return found
@staticmethod
def check_disk_type(vm_name, disk_src, disk_type, virsh_instance=base.virsh):
"""
Check if disk type is correct in VM
:param vm_name: Domain name.
:param disk_src: Domain disk source path
:param disk_type: Domain disk type
:return: True/False
"""
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
if not vmxml.get_disk_count(vm_name, virsh_instance=virsh_instance):
raise xcepts.LibvirtXMLError("No disk in domain %s." % vm_name)
disks = vmxml.get_disk_source(vm_name, virsh_instance=virsh_instance)
found = False
try:
for disk in disks:
disk_dev = ""
if disk_type == "file":
disk_dev = disk.find('source').get('file')
elif disk_type == "block":
disk_dev = disk.find('source').get('dev')
if disk_src == disk_dev:
found = True
except AttributeError:
logging.debug("No '%s' type disk." % disk_type)
return found
@staticmethod
def get_disk_serial(vm_name, disk_target, virsh_instance=base.virsh):
"""
Get disk serial in VM
:param vm_name: Domain name.
:param disk_target: Domain disk target
:return: disk serial
"""
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
if not vmxml.get_disk_count(vm_name, virsh_instance=virsh_instance):
raise xcepts.LibvirtXMLError("No disk in domain %s." % vm_name)
try:
disk = vmxml.get_disk_all()[disk_target]
except KeyError:
raise xcepts.LibvirtXMLError("Wrong disk target:%s." % disk_target)
serial = ""
try:
serial = disk.find("serial").text
except AttributeError:
logging.debug("No serial assigned.")
return serial
@staticmethod
def get_disk_address(vm_name, disk_target, virsh_instance=base.virsh):
"""
Get disk address in VM
:param vm_name: Domain name.
:param disk_target: Domain disk target
:return: disk address
"""
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
if not vmxml.get_disk_count(vm_name, virsh_instance=virsh_instance):
raise xcepts.LibvirtXMLError("No disk in domain %s." % vm_name)
try:
disk = vmxml.get_disk_all()[disk_target]
except KeyError:
raise xcepts.LibvirtXMLError("Wrong disk target:%s." % disk_target)
address_str = ""
try:
disk_bus = disk.find("target").get("bus")
address = disk.find("address")
if disk_bus == "virtio":
add_type = address.get("type")
add_domain = address.get("domain")
add_bus = address.get("bus")
add_slot = address.get("slot")
add_func = address.get("function")
address_str = ("%s:%s.%s.%s.%s"
% (add_type, add_domain, add_bus,
add_slot, add_func))
elif disk_bus in ["ide", "scsi"]:
bus = address.get("bus")
target = address.get("target")
unit = address.get("unit")
address_str = "%s:%s.%s.%s" % (disk_bus, bus, target, unit)
except AttributeError, e:
raise xcepts.LibvirtXMLError("Get wrong attribute: %s" % str(e))
return address_str
@staticmethod
def get_numa_params(vm_name, virsh_instance=base.virsh):
"""
Return VM's numa setting from XML definition
"""
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
return vmxml.numa
def get_primary_serial(self):
"""
Get a dict with primary serial features.
"""
xmltreefile = self.__dict_get__('xml')
primary_serial = xmltreefile.find('devices').find('serial')
serial_features = {}
serial_type = primary_serial.get('type')
serial_port = primary_serial.find('target').get('port')
# Support node here for more features
serial_features['serial'] = primary_serial
# Necessary features
serial_features['type'] = serial_type
serial_features['port'] = serial_port
return serial_features
@staticmethod
def set_primary_serial(vm_name, dev_type, port, path=None,
virsh_instance=base.virsh):
"""
Set primary serial's features of vm_name.
:param vm_name: Name of defined vm to set primary serial.
:param dev_type: the type of ``serial:pty,file...``
:param port: the port of serial
:param path: the path of serial, it is not necessary for pty
"""
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
xmltreefile = vmxml.__dict_get__('xml')
try:
serial = vmxml.get_primary_serial()['serial']
except AttributeError:
logging.debug("Can not find any serial, now create one.")
# Create serial tree, default is pty
serial = xml_utils.ElementTree.SubElement(
xmltreefile.find('devices'),
'serial', {'type': 'pty'})
# Create elements of serial target, default port is 0
xml_utils.ElementTree.SubElement(serial, 'target', {'port': '0'})
serial.set('type', dev_type)
serial.find('target').set('port', port)
# path may not be exist.
if path is not None:
serial.find('source').set('path', path)
else:
try:
source = serial.find('source')
serial.remove(source)
except AssertionError:
pass # Element not found, already removed.
xmltreefile.write()
vmxml.set_xml(xmltreefile.name)
vmxml.undefine()
vmxml.define()
@staticmethod
def set_agent_channel(vm_name):
"""
Add channel for guest agent running
:param vm_name: Name of defined vm to set agent channel
"""
vmxml = VMXML.new_from_dumpxml(vm_name)
try:
exist = vmxml.__dict_get__('xml').find('devices').findall('channel')
findc = 0
for ec in exist:
if ec.find('target').get('name') == "org.qemu.guest_agent.0":
findc = 1
break
if findc == 0:
raise AttributeError("Cannot find guest agent channel")
except AttributeError:
channel = vmxml.get_device_class('channel')(type_name='unix')
channel.add_source(mode='bind',
path='/var/lib/libvirt/qemu/guest.agent')
channel.add_target(type='virtio',
name='org.qemu.guest_agent.0')
vmxml.devices = vmxml.devices.append(channel)
vmxml.define()
@staticmethod
def remove_agent_channel(vm_name):
"""
Delete channel for guest agent
:param vm_name: Name of defined vm to remove agent channel
"""
vmxml = VMXML.new_from_dumpxml(vm_name)
try:
exist = vmxml.__dict_get__('xml').find('devices').findall('channel')
for ec in exist:
if ec.find('target').get('name') == "org.qemu.guest_agent.0":
channel = vmxml.get_device_class('channel')(type_name='unix')
channel.add_source(mode='bind',
path=ec.find('source').get('path'))
channel.add_target(type='virtio',
name=ec.find('target').get('name'))
vmxml.del_device(channel)
vmxml.define()
except AttributeError:
raise xcepts.LibvirtXMLError("Fail to remove agent channel!")
def get_iface_all(self):
"""
Get a dict with interface's mac and node.
"""
iface_nodes = self.xmltreefile.find('devices').findall('interface')
interfaces = {}
for node in iface_nodes:
mac_addr = node.find('mac').get('address')
interfaces[mac_addr] = node
return interfaces
@staticmethod
def get_iface_by_mac(vm_name, mac, virsh_instance=base.virsh):
"""
Get the interface if mac is matched.
:param vm_name: Name of defined vm.
:param mac: a mac address.
:return: return a dict include main interface's features
"""
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
interfaces = vmxml.get_iface_all()
try:
interface = interfaces[mac]
except KeyError:
interface = None
if interface is not None: # matched mac exists.
iface_type = interface.get('type')
source = interface.find('source').get(iface_type)
features = {}
features['type'] = iface_type
features['mac'] = mac
features['source'] = source
return features
else:
return None
@staticmethod
def get_iface_dev(vm_name, virsh_instance=base.virsh):
"""
Return VM's interface device from XML definition, None if not set
"""
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
ifaces = vmxml.get_iface_all()
if ifaces:
return ifaces.keys()
return None
@staticmethod
def get_first_mac_by_name(vm_name, virsh_instance=base.virsh):
"""
Convenience method for getting first mac of a defined VM
:param: vm_name: Name of defined vm to get mac
"""
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
xmltreefile = vmxml.__dict_get__('xml')
try:
iface = xmltreefile.find('devices').find('interface')
return iface.find('mac').get('address')
except AttributeError:
return None
@staticmethod
def get_iftune_params(vm_name, options="", virsh_instance=base.virsh):
"""
Return VM's interface tuning setting from XML definition
"""
vmxml = VMXML.new_from_dumpxml(vm_name, options=options,
virsh_instance=virsh_instance)
xmltreefile = vmxml.__dict_get__('xml')
iftune_params = {}
bandwidth = None
try:
bandwidth = xmltreefile.find('devices/interface/bandwidth')
try:
iftune_params['inbound'] = bandwidth.find(
'inbound').get('average')
iftune_params['outbound'] = bandwidth.find(
'outbound').get('average')
except AttributeError:
logging.error("Can't find <inbound> or <outbound> element")
except AttributeError:
logging.error("Can't find <bandwidth> element")
return iftune_params
def get_net_all(self):
"""
Return VM's net from XML definition, None if not set
"""
xmltreefile = self.__dict_get__('xml')
net_nodes = xmltreefile.find('devices').findall('interface')
nets = {}
for node in net_nodes:
dev = node.find('target').get('dev')
nets[dev] = node
return nets
# TODO re-visit this method after the libvirt_xml.devices.interface module
# is implemented
@staticmethod
def get_net_dev(vm_name):
"""
Get net device of a defined VM's nets.
:param vm_name: Name of defined vm.
"""
vmxml = VMXML.new_from_dumpxml(vm_name)
nets = vmxml.get_net_all()
if nets is not None:
return nets.keys()
return None
@staticmethod
def set_cpu_mode(vm_name, mode='host-model'):
"""
Set cpu's mode of VM.
:param vm_name: Name of defined vm to set cpu mode.
:param mode: the mode of cpu:'host-model'...
"""
vmxml = VMXML.new_from_dumpxml(vm_name)
vmxml.check_cpu_mode(mode)
xmltreefile = vmxml.__dict_get__('xml')
try:
cpu = xmltreefile.find('/cpu')
logging.debug("Current cpu mode is '%s'!", cpu.get('mode'))
cpu.set('mode', mode)
except AttributeError:
logging.debug("Can not find any cpu, now create one.")
cpu = xml_utils.ElementTree.SubElement(xmltreefile.getroot(),
'cpu', {'mode': mode})
xmltreefile.write()
vmxml.undefine()
vmxml.define()
def add_device(self, value):
"""
Add a device into VMXML.
:param value: instance of device in libvirt_xml/devices/
"""
devices = self.get_devices()
for device in devices:
if device == value:
logging.debug("Device %s is already in VM %s.", value, self)
return
devices.append(value)
self.set_devices(devices)
def del_device(self, value):
"""
Remove a device from VMXML
:param value: instance of device in libvirt_xml/devices/
"""
devices = self.get_devices()
not_found = True
for device in devices:
if device == value:
not_found = False
devices.remove(device)
break
if not_found:
logging.debug("Device %s does not exist in VM %s.", value, self)
return
self.set_devices(devices)
@staticmethod
def add_security_info(vmxml, passwd):
"""
Add passwd for graphic
:param vmxml: instance of VMXML
:param passwd: Password you want to set
"""
devices = vmxml.devices
graphics_index = devices.index(devices.by_device_tag('graphics')[0])
graphics = devices[graphics_index]
graphics.passwd = passwd
vmxml.devices = devices
vmxml.define()
def get_graphics_devices(self, type_name=""):
"""
Get all graphics devices or desired type graphics devices
:param type_name: graphic type, vnc or spice
"""
devices = self.get_devices()
graphics_devices = devices.by_device_tag('graphics')
graphics_list = []
for graphics_device in graphics_devices:
graphics_index = devices.index(graphics_device)
graphics = devices[graphics_index]
if not type_name:
graphics_list.append(graphics)
elif graphics.type_name == type_name:
graphics_list.append(graphics)
return graphics_list
def remove_all_graphics(self):
"""
Remove all graphics devices.
"""
self.xmltreefile.remove_by_xpath('/devices/graphics')
self.xmltreefile.write()
def add_hostdev(self, source_address, mode='subsystem',
hostdev_type='pci',
managed='yes'):
"""
Add a hostdev device to guest.
:param source_address: A dict include slot, function, bus, domain
"""
dev = self.get_device_class('hostdev')()
dev.mode = mode
dev.hostdev_type = hostdev_type
dev.managed = managed
dev.source_address = dev.new_source_address(**source_address)
self.add_device(dev)
@staticmethod
def get_blkio_params(vm_name, options="", virsh_instance=base.virsh):
"""
Return VM's block I/O setting from XML definition
"""
vmxml = VMXML.new_from_dumpxml(vm_name, options=options,
virsh_instance=virsh_instance)
xmltreefile = vmxml.__dict_get__('xml')
blkio_params = {}
try:
blkio = xmltreefile.find('blkiotune')
try:
blkio_params['weight'] = blkio.find('weight').text
except AttributeError:
logging.error("Can't find <weight> element")
except AttributeError:
logging.error("Can't find <blkiotune> element")
if blkio and blkio.find('device'):
blkio_params['device_weights_path'] = \
blkio.find('device').find('path').text
blkio_params['device_weights_weight'] = \
blkio.find('device').find('weight').text
return blkio_params
@staticmethod
def get_blkdevio_params(vm_name, options="", virsh_instance=base.virsh):
"""
Return VM's block I/O tuning setting from XML definition
"""
vmxml = VMXML.new_from_dumpxml(vm_name, options=options,
virsh_instance=virsh_instance)
xmltreefile = vmxml.__dict_get__('xml')
blkdevio_params = {}
iotune = None
blkdevio_list = ['total_bytes_sec', 'read_bytes_sec',
'write_bytes_sec', 'total_iops_sec',
'read_iops_sec', 'write_iops_sec']
# Initialize all of arguments to zero
for k in blkdevio_list:
blkdevio_params[k] = 0
try:
iotune = xmltreefile.find('/devices/disk/iotune')
for k in blkdevio_list:
if iotune.findall(k):
blkdevio_params[k] = int(iotune.find(k).text)
except AttributeError:
xcepts.LibvirtXMLError("Can't find <iotune> element")
return blkdevio_params
@staticmethod
def set_memoryBacking_tag(vm_name, hpgs=True, nosp=False, locked=False,
virsh_instance=base.virsh):
"""
let the guest using hugepages.
"""
# Create a new memoryBacking tag
mb_xml = VMMemBackingXML()
mb_xml.nosharepages = nosp
mb_xml.locked = locked
if hpgs:
hpgs = VMHugepagesXML()
mb_xml.hugepages = hpgs
# Set memoryBacking to the new instance.
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
vmxml.mb = mb_xml
vmxml.sync()
class VMCPUXML(base.LibvirtXMLBase):
"""
Higher-level manipulations related to VM's XML(CPU)
"""
# Must copy these here or there will be descriptor problems
__slots__ = ('model', 'vendor', 'feature_list', 'mode', 'match',
'fallback', 'topology')
def __init__(self, virsh_instance=base.virsh):
"""
Create new VMCPU XML instance
"""
# The set action is for test.
accessors.XMLAttribute(property_name="mode",
libvirtxml=self,
forbidden=[],
parent_xpath='/',
tag_name='cpu',
attribute='mode')
accessors.XMLAttribute(property_name="match",
libvirtxml=self,
forbidden=[],
parent_xpath='/',
tag_name='cpu',
attribute='match')
accessors.XMLElementText(property_name="model",
libvirtxml=self,
forbidden=[],
parent_xpath='/',
tag_name='model')
accessors.XMLElementText(property_name="vendor",
libvirtxml=self,
forbidden=[],
parent_xpath='/',
tag_name='vendor')
accessors.XMLAttribute(property_name="fallback",
libvirtxml=self,
forbidden=[],
parent_xpath='/',
tag_name='model',
attribute='fallback')
accessors.XMLElementDict(property_name="topology",
libvirtxml=self,
forbidden=[],
parent_xpath='/',
tag_name='topology')
# This will skip self.get_feature_list() defined below
accessors.AllForbidden(property_name="feature_list",
libvirtxml=self)
super(VMCPUXML, self).__init__(virsh_instance=virsh_instance)
self.xml = '<cpu/>'
def get_feature_list(self):
"""
Accessor method for feature_list property (in __slots__)
"""
feature_list = []
xmltreefile = self.__dict_get__('xml')
for feature_node in xmltreefile.findall('/feature'):
feature_list.append(feature_node)
return feature_list
def get_feature(self, num):
"""
Get a feature element from feature list by number
:return: Feature element
"""
count = len(self.feature_list)
try:
num = int(num)
return self.feature_list[num]
except (ValueError, TypeError):
raise xcepts.LibvirtXMLError("Invalid feature number %s" % num)
except IndexError:
raise xcepts.LibvirtXMLError("Only %d feature(s)" % count)
def get_feature_name(self, num):
"""
Get feature name
:param num: Number in feature list
:return: Feature name
"""
return self.get_feature(num).get('name')
def get_feature_policy(self, num):
"""
Get feature policy
:param num: Number in feature list
:return: Feature policy
"""
return self.get_feature(num).get('policy')
def remove_feature(self, num):
"""
Remove a feature from xml
:param num: Number in feature list
"""
xmltreefile = self.__dict_get__('xml')
node = xmltreefile.getroot()
node.remove(self.get_feature(num))
@staticmethod
def check_feature_name(value):
"""
Check feature name valid or not.
:param value: Feature name
:return: True if check pass
"""
sys_feature = []
cpu_xml_file = open('/proc/cpuinfo', 'r')
for line in cpu_xml_file.readline():
if line.find('flags') != -1:
feature_names = line.split(':')[1].strip()
sys_sub_feature = feature_names.split(' ')
sys_feature = list(set(sys_feature + sys_sub_feature))
cpu_xml_file.close()
return (value in sys_feature)
def set_feature(self, num, name='', policy=''):
"""
Set feature name (and policy) to xml
:param num: Number in feature list
:param name: New feature name
:param policy: New feature policy
"""
feature_set_node = self.get_feature(num)
if name:
feature_set_node.set('name', name)
if policy:
feature_set_node.set('policy', policy)
def add_feature(self, name, policy=''):
"""
Add a feature element to xml
:param name: New feature name
:param policy: New feature policy
"""
xmltreefile = self.__dict_get__('xml')
node = xmltreefile.getroot()
feature_node = {'name': name}
if policy:
feature_node.update({'policy': policy})
xml_utils.ElementTree.SubElement(node, 'feature', feature_node)
class VMClockXML(VMXML):
"""
Higher-level manipulations related to VM's XML(Clock)
"""
# Must copy these here or there will be descriptor problems
__slots__ = ('offset', 'timezone', 'adjustment', 'timers')
def __init__(self, virsh_instance=base.virsh, offset="utc"):
"""
Create new VMClock XML instance
"""
# The set action is for test.
accessors.XMLAttribute(property_name="offset",
libvirtxml=self,
forbidden=[],
parent_xpath='/',
tag_name='clock',
attribute='offset')
accessors.XMLAttribute(property_name="timezone",
libvirtxml=self,
forbidden=[],
parent_xpath='/',
tag_name='clock',
attribute='timezone')
accessors.XMLAttribute(property_name="adjustment",
libvirtxml=self,
forbidden=[],
parent_xpath='/',
tag_name='clock',
attribute='adjustment')
accessors.XMLElementList(property_name="timers",
libvirtxml=self,
forbidden=[],
parent_xpath="/clock",
marshal_from=self.marshal_from_timer,
marshal_to=self.marshal_to_timer)
super(VMClockXML, self).__init__(virsh_instance=virsh_instance)
# Set default offset for clock
self.offset = offset
def from_dumpxml(self, vm_name, virsh_instance=base.virsh):
"""Helper to load xml from domain."""
self.xml = VMXML.new_from_dumpxml(vm_name,
virsh_instance=virsh_instance).xml
# Sub-element of clock
class TimerXML(VMXML):
"""Timer element of clock"""
__slots__ = ('name', 'present', 'track', 'tickpolicy', 'frequency',
'mode', 'catchup_threshold', 'catchup_slew',
'catchup_limit')
def __init__(self, virsh_instance=base.virsh, timer_name="tsc"):
"""
Create new TimerXML instance
"""
# The set action is for test.
accessors.XMLAttribute(property_name="name",
libvirtxml=self,
forbidden=[],
parent_xpath='/clock',
tag_name='timer',
attribute='name')
accessors.XMLAttribute(property_name="present",
libvirtxml=self,
forbidden=[],
parent_xpath='/clock',
tag_name='timer',
attribute='present')
accessors.XMLAttribute(property_name="track",
libvirtxml=self,
forbidden=[],
parent_xpath='/clock',
tag_name='timer',
attribute='track')
accessors.XMLAttribute(property_name="tickpolicy",
libvirtxml=self,
forbidden=[],
parent_xpath='/clock',
tag_name='timer',
attribute='tickpolicy')
accessors.XMLAttribute(property_name="frequency",
libvirtxml=self,
forbidden=[],
parent_xpath='/clock',
tag_name='timer',
attribute='frequency')
accessors.XMLAttribute(property_name="mode",
libvirtxml=self,
forbidden=[],
parent_xpath='/clock',
tag_name='timer',
attribute='mode')
accessors.XMLAttribute(property_name="catchup_threshold",
libvirtxml=self,
forbidden=[],
parent_xpath='/clock/timer',
tag_name='catchup',
attribute='threshold')
accessors.XMLAttribute(property_name="catchup_slew",
libvirtxml=self,
forbidden=[],
parent_xpath='/clock/timer',
tag_name='catchup',
attribute='slew')
accessors.XMLAttribute(property_name="catchup_limit",
libvirtxml=self,
forbidden=[],
parent_xpath='/clock/timer',
tag_name='catchup',
attribute='limit')
super(VMClockXML.TimerXML, self).__init__(virsh_instance=virsh_instance)
# name is mandatory for timer
self.name = timer_name
def update(self, attr_dict):
for attr, value in attr_dict.items():
setattr(self, attr, value)
@staticmethod
def marshal_from_timer(item, index, libvirtxml):
"""Convert a TimerXML instance into tag + attributes"""
del index
del libvirtxml
timer = item.xmltreefile.find("clock/timer")
try:
return (timer.tag, dict(timer.items()))
except AttributeError: # Didn't find timer
raise xcepts.LibvirtXMLError("Expected a list of timer "
"instances, not a %s" % str(item))
@staticmethod
def marshal_to_timer(tag, attr_dict, index, libvirtxml):
"""Convert a tag + attributes to a TimerXML instance"""
del index
if tag == 'timer':
newone = VMClockXML.TimerXML(virsh_instance=libvirtxml.virsh)
newone.update(attr_dict)
return newone
else:
return None
class VMCPUTuneXML(base.LibvirtXMLBase):
"""
CPU tuning tag XML class
Elements:
vcpupins: list of dict - vcpu, cpuset
emulatorpin: attribute - cpuset
shares: int
period: int
quota: int
emulator_period: int
emulator_quota: int
"""
__slots__ = ('vcpupins', 'emulatorpin', 'shares', 'period', 'quota',
'emulator_period', 'emulator_quota')
def __init__(self, virsh_instance=base.virsh):
accessors.XMLElementList('vcpupins', self, parent_xpath='/',
marshal_from=self.marshal_from_vcpupins,
marshal_to=self.marshal_to_vcpupins)
accessors.XMLAttribute('emulatorpin', self, parent_xpath='/',
tag_name='emulatorpin', attribute='cpuset')
for slot in self.__all_slots__:
if slot in ('shares', 'period', 'quota', 'emulator_period',
'emulator_quota'):
accessors.XMLElementInt(slot, self, parent_xpath='/',
tag_name=slot)
super(self.__class__, self).__init__(virsh_instance=virsh_instance)
self.xml = '<cputune/>'
@staticmethod
def marshal_from_vcpupins(item, index, libvirtxml):
"""
Convert a dict to vcpupin tag and attributes.
"""
del index
del libvirtxml
if not isinstance(item, dict):
raise xcepts.LibvirtXMLError("Expected a dictionary of host "
"attributes, not a %s"
% str(item))
return ('vcpupin', dict(item))
@staticmethod
def marshal_to_vcpupins(tag, attr_dict, index, libvirtxml):
"""
Convert a vcpupin tag and attributes to a dict.
"""
del index
del libvirtxml
if tag != 'vcpupin':
return None
return dict(attr_dict)
class VMOSXML(base.LibvirtXMLBase):
"""
Class to access <os> tag of domain XML.
Elements:
type: text attributes - arch, machine
loader: path
boots: list attributes - dev
bootmenu: attributes - enable
smbios: attributes - mode
bios: attributes - useserial, rebootTimeout
init: text
bootloader: text
bootloader_args: text
kernel: text
initrd: text
cmdline: text
dtb: text
TODO:
initargs: list
"""
__slots__ = ('type', 'arch', 'machine', 'loader', 'boots', 'bootmenu_enable',
'smbios_mode', 'bios_useserial', 'bios_reboot_timeout', 'init',
'bootloader', 'bootloader_args', 'kernel', 'initrd', 'cmdline',
'dtb', 'initargs')
def __init__(self, virsh_instance=base.virsh):
accessors.XMLElementText('type', self, parent_xpath='/',
tag_name='type')
accessors.XMLElementText('loader', self, parent_xpath='/',
tag_name='loader')
accessors.XMLAttribute('arch', self, parent_xpath='/',
tag_name='type', attribute='arch')
accessors.XMLAttribute('machine', self, parent_xpath='/',
tag_name='type', attribute='machine')
accessors.XMLElementList('boots', self, parent_xpath='/',
marshal_from=self.marshal_from_boots,
marshal_to=self.marshal_to_boots)
accessors.XMLAttribute('bootmenu_enable', self, parent_xpath='/',
tag_name='bootmenu', attribute='enable')
accessors.XMLAttribute('smbios_mode', self, parent_xpath='/',
tag_name='smbios', attribute='mode')
accessors.XMLAttribute('bios_useserial', self, parent_xpath='/',
tag_name='bios', attribute='useserial')
accessors.XMLAttribute('bios_reboot_timeout', self, parent_xpath='/',
tag_name='bios', attribute='rebootTimeout')
accessors.XMLElementText('bootloader', self, parent_xpath='/',
tag_name='bootloader')
accessors.XMLElementText('bootloader_args', self, parent_xpath='/',
tag_name='bootloader_args')
accessors.XMLElementText('kernel', self, parent_xpath='/',
tag_name='kernel')
accessors.XMLElementText('initrd', self, parent_xpath='/',
tag_name='initrd')
accessors.XMLElementText('cmdline', self, parent_xpath='/',
tag_name='cmdline')
accessors.XMLElementText('dtb', self, parent_xpath='/',
tag_name='dtb')
accessors.XMLElementText('init', self, parent_xpath='/',
tag_name='init')
super(self.__class__, self).__init__(virsh_instance=virsh_instance)
self.xml = '<os/>'
@staticmethod
def marshal_from_boots(item, index, libvirtxml):
"""
Convert a string to boot tag and attributes.
"""
del index
del libvirtxml
return ('boot', {'dev': item})
@staticmethod
def marshal_to_boots(tag, attr_dict, index, libvirtxml):
"""
Convert a boot tag and attributes to a string.
"""
del index
del libvirtxml
if tag != 'boot':
return None
return attr_dict['dev']
class VMPMXML(base.LibvirtXMLBase):
"""
VM power management tag XML class
Elements:
suspend-to-disk: attribute - enabled
suspend-to-mem: attribute - enabled
"""
__slots__ = ('disk_enabled', 'mem_enabled')
def __init__(self, virsh_instance=base.virsh):
accessors.XMLAttribute('disk_enabled', self, parent_xpath='/',
tag_name='suspend-to-disk', attribute='enabled')
accessors.XMLAttribute('mem_enabled', self, parent_xpath='/',
tag_name='suspend-to-mem', attribute='enabled')
super(self.__class__, self).__init__(virsh_instance=virsh_instance)
self.xml = '<pm/>'
class VMFeaturesXML(base.LibvirtXMLBase):
"""
Class to access <features> tag of domain XML.
Elements:
feature_list list of top level element
hyperv_relaxed: attribute - state
hyperv_vapic: attribute - state
hyperv_spinlocks: attributes - state, retries
kvm_hidden: attribute - state
pvspinlock: attribute - state
"""
__slots__ = ('feature_list', 'hyperv_relaxed_state', 'hyperv_vapic_state',
'hyperv_spinlocks_state', 'hyperv_spinlocks_retries',
'kvm_hidden_state', 'pvspinlock_state')
def __init__(self, virsh_instance=base.virsh):
accessors.XMLAttribute(property_name='hyperv_relaxed_state',
libvirtxml=self,
parent_xpath='/hyperv',
tag_name='relaxed',
attribute='state')
accessors.XMLAttribute(property_name='hyperv_vapic_state',
libvirtxml=self,
parent_xpath='/hyperv',
tag_name='vapic',
attribute='state')
accessors.XMLAttribute(property_name='hyperv_spinlocks_state',
libvirtxml=self,
parent_xpath='/hyperv',
tag_name='spinlocks',
attribute='state')
accessors.XMLAttribute(property_name='hyperv_spinlocks_retries',
libvirtxml=self,
parent_xpath='/hyperv',
tag_name='spinlocks',
attribute='retries')
accessors.XMLAttribute(property_name='kvm_hidden_state',
libvirtxml=self,
parent_xpath='/kvm',
tag_name='hidden',
attribute='state')
accessors.XMLAttribute(property_name='pvspinlock_state',
libvirtxml=self,
parent_xpath='/',
tag_name='pvspinlock',
attribute='state')
accessors.AllForbidden(property_name="feature_list",
libvirtxml=self)
super(self.__class__, self).__init__(virsh_instance=virsh_instance)
self.xml = '<features/>'
def get_feature_list(self):
"""
Return all features(top level elements) in xml
"""
feature_list = []
root = self.__dict_get__('xml').getroot()
for feature in root:
feature_list.append(feature.tag)
return feature_list
def has_feature(self, name):
"""
Return true if the given feature exist in xml
"""
return name in self.get_feature_list()
def add_feature(self, name, attr_name='', attr_value=''):
"""
Add a feature element to xml
:params name: Feature name
"""
if self.has_feature(name):
logging.debug("Feature %s already exist, so remove it", name)
self.remove_feature(name)
root = self.__dict_get__('xml').getroot()
new_attr = {}
if attr_name:
new_attr = {attr_name: attr_value}
xml_utils.ElementTree.SubElement(root, name, new_attr)
def remove_feature(self, name):
"""
Remove a feature element from xml
:params name: Feature name
"""
root = self.__dict_get__('xml').getroot()
remove_feature = root.find(name)
if remove_feature is None:
logging.error("Feature %s doesn't exist", name)
else:
root.remove(remove_feature)
# Sub-element of memoryBacking
class VMHugepagesXML(VMXML):
"""hugepages element"""
__slots__ = ('pages',)
def __init__(self, virsh_instance=base.virsh):
accessors.XMLElementList('pages',
libvirtxml=self,
forbidden=[],
parent_xpath="/",
marshal_from=self.marshal_from_page,
marshal_to=self.marshal_to_page)
super(self.__class__, self).__init__(virsh_instance=virsh_instance)
self.xml = '<hugepages/>'
# Sub-element of hugepages
class PageXML(VMXML):
"""Page element of hugepages"""
__slots__ = ('size', 'unit', 'nodeset')
def __init__(self, virsh_instance=base.virsh):
"""
Create new PageXML instance
"""
accessors.XMLAttribute(property_name="size",
libvirtxml=self,
forbidden=[],
parent_xpath='/hugepages',
tag_name='page',
attribute='size')
accessors.XMLAttribute(property_name="unit",
libvirtxml=self,
forbidden=[],
parent_xpath='/hugepages',
tag_name='page',
attribute='unit')
accessors.XMLAttribute(property_name="nodeset",
libvirtxml=self,
forbidden=[],
parent_xpath='/hugepages',
tag_name='page',
attribute='nodeset')
super(VMHugepagesXML.PageXML, self).__init__(virsh_instance=virsh_instance)
def update(self, attr_dict):
for attr, value in attr_dict.items():
setattr(self, attr, value)
@staticmethod
def marshal_from_page(item, index, libvirtxml):
"""Convert a PageXML instance into tag + attributes"""
del index
del libvirtxml
page = item.xmltreefile.find("/hugepages/page")
try:
return (page.tag, dict(page.items()))
except AttributeError: # Didn't find page
raise xcepts.LibvirtXMLError("Expected a list of page "
"instances, not a %s" % str(item))
@staticmethod
def marshal_to_page(tag, attr_dict, index, libvirtxml):
"""Convert a tag + attributes to a PageXML instance"""
del index
if tag == 'page':
newone = VMHugepagesXML.PageXML(virsh_instance=libvirtxml.virsh)
newone.update(attr_dict)
return newone
else:
return None
class VMMemBackingXML(VMXML):
"""
memoryBacking tag XML class
Elements:
hugepages
nosharepages
locked
"""
__slots__ = ('hugepages', 'nosharepages', 'locked')
def __init__(self, virsh_instance=base.virsh):
accessors.XMLElementNest(property_name='hugepages',
libvirtxml=self,
parent_xpath='/',
tag_name='hugepages',
subclass=VMHugepagesXML,
subclass_dargs={
'virsh_instance': virsh_instance})
for slot in ('nosharepages', 'locked'):
accessors.XMLElementBool(slot, self, parent_xpath='/',
tag_name=slot)
super(self.__class__, self).__init__(virsh_instance=virsh_instance)
self.xml = '<memoryBacking/>'
|
chuanchang/tp-libvirt
|
virttest/libvirt_xml/vm_xml.py
|
Python
|
gpl-2.0
| 73,245
|
[
"VisIt"
] |
8b15a95381c0c8e48d1348d097248ac31ec169c48772364d1937e3396e3b83ff
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
************************************
**AdResS** - Object
************************************
The AdResS object is an extension to the integrator. It makes sure that the
integrator also processes the atomistic particles and not only the CG particles.
Hence, this object is of course only used when performing AdResS or H-AdResS
simulations.
In detail the AdResS extension makes sure:
---------------------------------------------
* that also the forces on the atomistic particles are initialized and set to
by Adress::initForces
* that also the atomistic particles are integrated and propagated by
Adress::integrate1 and Adress::integrate2
Example - how to turn on the AdResS integrator extension:
>>> adress = espresso.integrator.Adress(system)
>>> integrator.addExtension(adress)
"""
from espresso.esutil import cxxinit
from espresso import pmi
from espresso.integrator.Extension import *
from _espresso import integrator_Adress
class AdressLocal(ExtensionLocal, integrator_Adress):
'The (local) AdResS'
def __init__(self, _system, _verletlist, _fixedtuplelist, KTI = False):
'Local construction of a verlet list for AdResS'
if pmi.workerIsActive():
cxxinit(self, integrator_Adress, _system, _verletlist, _fixedtuplelist, KTI)
if pmi.isController:
class Adress(Extension):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.integrator.AdressLocal' #,
#pmiproperty = [ 'builds' ],
#pmicall = [ 'totalSize', 'exclude', 'addAdrParticles', 'rebuild' ]
)
|
BackupTheBerlios/espressopp
|
src/integrator/Adress.py
|
Python
|
gpl-3.0
| 2,463
|
[
"ESPResSo"
] |
e7d4d81c236d932547a973f3b4bb8af2e6720f4544a212bf9b46ea6b4cfcb64a
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.teachers import Teacher
from .build import build
from parlai.utils.io import PathManager
import json
import os
import random
import copy
WELCOME_MESSAGE = "Negotiate with your opponent to decide who gets how many items of each kind. There are three kinds of packages: Food, Water, and Firewood. Each has a quantity of 3. Try hard to get as much value as you can, while still leaving your partner satisfied and with a positive perception about you. If you fail to come to an agreement, both parties get 5 points. Refer to the following preference order and arguments for your negotiation: \n\nFood\nValue: {food_val} points for each package\nArgument: {food_argument}\n\nWater\nValue: {water_val} points for each package\nArgument: {water_argument}\n\nFirewood\nValue: {firewood_val} points for each package\nArgument: {firewood_argument}\n"
def get_welcome_values(part_info):
value2points = {'High': 5, 'Medium': 4, 'Low': 3}
issue2points = {v: value2points[k] for k, v in part_info['value2issue'].items()}
issue2reason = {
v: part_info['value2reason'][k] for k, v in part_info['value2issue'].items()
}
welcome_values = {}
for issue in ['Food', 'Water', 'Firewood']:
welcome_values[issue.lower() + '_val'] = issue2points[issue]
welcome_values[issue.lower() + '_argument'] = issue2reason[issue]
return welcome_values
def get_utterance_text(utterance):
if utterance['text'] == '<DUMMY>':
return ''
# the utterance is not a dummy one at this point
if utterance['text'] != 'Submit-Deal':
# simply return it
return utterance['text']
# if it is a Submit-Deal -> attach task_data
txt = f"{utterance['text']} What I get- Food:{utterance['task_data']['issue2youget']['Food']}, Water: {utterance['task_data']['issue2youget']['Water']}, Firewood: {utterance['task_data']['issue2youget']['Firewood']}; What you get- Food:{utterance['task_data']['issue2theyget']['Food']}, Water: {utterance['task_data']['issue2theyget']['Water']}, Firewood: {utterance['task_data']['issue2theyget']['Firewood']}"
return txt
class CasinoTeacher(Teacher):
"""
A negotiation teacher that loads the CaSiNo data from
https://github.com/kushalchawla/CaSiNo.
Each dialogue is converted into two datapoints, one from the perspective of each
participant.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.datatype = opt['datatype'].split(':')[0]
self.datatype_ = opt['datatype']
self.random = self.datatype_ == 'train'
build(opt)
filename = self.datatype
data_path = os.path.join(
opt['datapath'], 'casino', 'casino_' + filename + '.json'
)
if shared and 'data' in shared:
self.episodes = shared['episodes']
else:
self._setup_data(data_path)
print(f"Total episodes: {self.num_episodes()}")
# for ordered data in batch mode (especially, for validation and
# testing), each teacher in the batch gets a start index and a step
# size so they all process disparate sets of the data
self.step_size = opt.get('batchsize', 1)
self.data_offset = opt.get('batchindex', 0)
self.reset()
def _setup_data(self, data_path):
print('loading: ' + data_path)
with PathManager.open(data_path) as data_file:
dialogues = json.load(data_file)
episodes = []
for dialogue in dialogues:
# divide the dialogue into two perspectives, one for each participant
episode = copy.deepcopy(dialogue)
episode[
'perspective'
] = (
'mturk_agent_1'
) # id of the agent whose perspective will be used in this dialog
episodes.append(episode)
episode = copy.deepcopy(dialogue)
episode[
'perspective'
] = (
'mturk_agent_2'
) # id of the agent whose perspective will be used in this dialog
episodes.append(episode)
self.episodes = episodes
# add dummy data to ensure that every chat begins with a teacher utterance (THEM) and ends at the agent's utterance (YOU). This is done for uniformity while parsing the data. It makes the code simpler and easier to read than DealNoDeal counterpart.
for ix, episode in enumerate(self.episodes):
chat_logs = episode['chat_logs']
perspective = episode['perspective']
if chat_logs[0]['id'] == perspective:
# chat must start with a teacher; add dummy utterance
dummy_utterance = {
'text': '<DUMMY>',
'task_data': {},
'id': 'mturk_agent_1'
if perspective == 'mturk_agent_2'
else 'mturk_agent_2',
}
chat_logs = [dummy_utterance] + chat_logs
if chat_logs[-1]['id'] != perspective:
# chat must end with the agent; add dummy utterance
dummy_utterance = {
'text': '<DUMMY>',
'task_data': {},
'id': 'mturk_agent_1'
if perspective == 'mturk_agent_1'
else 'mturk_agent_2',
}
chat_logs = chat_logs + [dummy_utterance]
self.episodes[ix]['chat_logs'] = chat_logs
def reset(self):
super().reset()
self.episode_idx = self.data_offset - self.step_size
self.dialogue_idx = None
self.perspective = None
self.dialogue = None
self.output = None
self.expected_response = None
self.epochDone = False
def num_examples(self):
"""
Lets return the the number of responses that an agent would generate in one
epoch + 1 count for every output.
This will include special utterances for submit-deal, accept-deal, and reject-
deal.
"""
num_exs = 0
for episode in self.episodes:
for utt in episode['chat_logs']:
if utt['text'] != '<DUMMY>':
# skip the dummy utterances
num_exs += 1
return (num_exs // 2) + len(
self.episodes
) # since each dialogue was converted into 2 perspectives, one for each participant: see _setup_data
def num_episodes(self):
return len(self.episodes)
def share(self):
shared = super().share()
shared['episodes'] = self.episodes
return shared
def observe(self, observation):
"""
Process observation for metrics.
"""
if self.expected_response is not None:
self.metrics.evaluate_response(observation, self.expected_response)
self.expected_response = None
return observation
def act(self):
if self.dialogue_idx is not None:
# continue existing conversation
return self._continue_dialogue()
elif self.random:
# if random, then select the next random example
self.episode_idx = random.randrange(len(self.episodes))
return self._start_dialogue()
elif self.episode_idx + self.step_size >= len(self.episodes):
# end of examples
self.epochDone = True
return {'episode_done': True}
else:
# get next non-random example
self.episode_idx = (self.episode_idx + self.step_size) % len(self.episodes)
return self._start_dialogue()
def _start_dialogue(self):
"""
Starting a dialogue should be the same as continuing a dialogue but with just
one difference: it will attach the welcome note to the teacher's utterance.
Each dialogue has two agents possible: mturk_agent_1 or mturk_agent_2. One of
them will act as the perspective for this episode.
"""
episode = self.episodes[self.episode_idx]
self.perspective = episode['perspective']
self.other_id = (
'mturk_agent_1' if self.perspective == 'mturk_agent_2' else 'mturk_agent_2'
)
part_info = episode['participant_info'][self.perspective]
part_info_other = episode['participant_info'][self.other_id]
welcome_values = get_welcome_values(part_info)
welcome = WELCOME_MESSAGE.format(
food_val=welcome_values['food_val'],
water_val=welcome_values['water_val'],
firewood_val=welcome_values['firewood_val'],
food_argument=welcome_values['food_argument'],
water_argument=welcome_values['water_argument'],
firewood_argument=welcome_values['firewood_argument'],
)
self.dialogue = episode['chat_logs']
self.output = {
'your_points_scored': part_info['outcomes']['points_scored'],
'how_satisfied_is_your_partner': part_info_other['outcomes'][
'satisfaction'
],
'how_much_does_your_partner_like_you': part_info_other['outcomes'][
'opponent_likeness'
],
}
self.dialogue_idx = -1
action = self._continue_dialogue()
if action['text']:
# This is non-empty; meaning the teacher starts the conversation and has something to say.
action['text'] = f"{welcome}\n{action['text']}"
else:
# text is empty, meaning that the teacher did not start the conversation but the empty string is just a result of the dummy teacher utterance added in _setup_data
action['text'] = welcome
action['meta-info'] = welcome_values
return action
def _continue_dialogue(self):
"""
Return an action object.
From the perspective of a specific agent's id, all utterances authored by the
other agent are coming from the teacher as the text of the action object, and
all utterances authored by this agent appear as the labels.
"""
action = {}
# Fill in teacher's message (THEM)
self.dialogue_idx += 1
if self.dialogue_idx < len(self.dialogue):
# this is a usual dialogue teacher-agent pair; return the teacher's utterance as action text.
utterance = self.dialogue[self.dialogue_idx]
assert utterance['id'] != self.perspective
utterance_text = get_utterance_text(
utterance
) # will take care of special submit-deal utterance and dummy utterances
action['text'] = utterance_text
if action['text'] == 'Reject-Deal':
# merge with the next dialogue_idx since that is from the same participant while this code assumes alternative utterances.
self.dialogue_idx += 1 # we know that this will be valid
utterance = self.dialogue[self.dialogue_idx]
assert utterance['id'] != self.perspective
utterance_text = get_utterance_text(
utterance
) # will take care of special submit-deal utterance and dummy utterances
action['text'] = action['text'] + ' ' + utterance_text
else:
# the primary dialogue is over; now is the time to return the output of this dialogue
action[
'text'
] = f"Your points scored: {self.output['your_points_scored']}, How satisfied is your partner: {self.output['how_satisfied_is_your_partner']}, How much does your partner like you: {self.output['how_much_does_your_partner_like_you']}"
# Fill in learner's response (YOU)
self.dialogue_idx += 1
self.expected_response = None
if self.dialogue_idx < len(self.dialogue):
# usual dialogue going on; return the agent's utterance as the labels
utterance = self.dialogue[self.dialogue_idx]
assert (
utterance['id'] == self.perspective
), f"id: {utterance['id']}, perspect: {self.perspective}"
utterance_text1 = get_utterance_text(
utterance
) # will take care of special submit-deal utterance and dummy utterances
utterance_text2 = ''
if utterance_text1 == 'Reject-Deal':
# merge with the next dialogue_idx since that is from the same participant while this code assumes alternative utterances.
self.dialogue_idx += 1 # we know that this will be valid
utterance = self.dialogue[self.dialogue_idx]
assert utterance['id'] == self.perspective
utterance_text2 = get_utterance_text(
utterance
) # will take care of special submit-deal utterance and dummy utterances
self.expected_response = (
[utterance_text1 + ' ' + utterance_text2]
if (utterance_text1 + ' ' + utterance_text2).strip()
else None
)
else:
# no label required when the primary dialogue is complete
pass
if self.expected_response:
# since labels is automatically renamed to eval_labels for valid/test, doing just this takes care of everything. Ensures that labels can atleast be accessed regardless of the datatype.
action['labels'] = self.expected_response
if self.dialogue_idx >= len(self.dialogue):
self.dialogue_idx = None
action['episode_done'] = True
else:
action['episode_done'] = False
return action
class DefaultTeacher(CasinoTeacher):
pass
|
facebookresearch/ParlAI
|
parlai/tasks/casino/agents.py
|
Python
|
mit
| 14,045
|
[
"CASINO"
] |
138bbc0f9f2afa792248f26eb6e41a0b312358cf7f5ae16f7eb8404f9186ae4a
|
import numpy as np
import pdb
from scipy.interpolate import interp1d
from scipy.stats import pearsonr
from scipy.stats import norm
import scipy.signal as signal
from matplotlib import gridspec
import matplotlib
matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
from matplotlib.widgets import RadioButtons, Button, CheckButtons
import sys
from types import *
class Estimateline:
'''Class to manually estimate where lines are located'''
def __init__(self,pspec,ax5,uline):
print 'If redshift calibration appears correct, hit "Accept and Close". '\
'Otherwise, "right click" approx. where the '+uline+' line is in the plotted spectrum. '\
'The program will re-correlate based on this guess.'
self.ax5 = ax5
self.cid3 = pspec.figure.canvas.mpl_connect('button_press_event',self.onclick)
def on_key_press(self,event):
if event.key == 'shift':
self.shift_is_held = True
def on_key_release(self, event):
if event.key == 'shift':
self.shift_is_held = False
def onclick(self,event):
if event.inaxes == self.ax5:
if event.button == 3:
print 'xdata=%f, ydata%f'%(event.xdata, event.ydata)
self.lam = event.xdata
plt.close()
'''
if event.button == 1:
#if self.shift_is_held:
# print 'xdata=%f, ydata%f'%(event.xdata, event.ydata)
# self.lam = event.xdata
# plt.close()
#else:
plt.close()
'''
else: return
class z_est:
def __init__(self,lower_w=3900.0,upper_w=5500.0,lower_z=0.01,upper_z=0.35,z_res=3.0e-5,skip_initial_priors=False):
'''
Initialize redshift estimate parameters
'''
#preconditions
assert lower_w and upper_w, "wavelength bounds must have values"
assert lower_z and upper_z, "redshift bounds must have values"
assert (type(lower_w) == IntType or type(lower_w) == FloatType) and (type(upper_w) == IntType or \
type(upper_w) == FloatType), "wavelength bounds must be integers or floats"
assert (type(lower_z) == IntType or type(lower_z) == FloatType) and (type(upper_z) == IntType or \
type(upper_z) == FloatType), "wavelength bounds must be integers or floats"
assert lower_w < upper_w, "lower_w must be < upper_w"
assert lower_z < upper_z, "lower_z must be < upper_z"
#set class attributes
self.lower_w = lower_w
self.upper_w = upper_w
self.lower_z = lower_z
self.upper_z = upper_z
self.z_res = z_res
#create redshift array and initialize correlation value array
self.ztest = np.arange(self.lower_z,self.upper_z,self.z_res)
self.corr_val_i = np.zeros(self.ztest.size)
#set redshift prior flag
if skip_initial_priors:
self.est_pre_z = '3'
self.uline_n = 'HK'
self.uline = 3950.0
self.z_prior_width = 0.06
else:
self.est_pre_z = raw_input('(1) Use a known prior [Examples: median of known redshifts. Galaxy photoz measurements] \n'\
'(2) View spectrum and specify a redshift prior \n'\
'(3) No prior\n')
#catch and correct false entry
_est_enter = False
self.uline_n = raw_input('What is the name of a spectral line you wish to use to identify redshift priors? '\
'[Default: HK]: ')
if not self.uline_n:
self.uline_n = 'HK'
self.uline = raw_input('Please list the approx. rest wavelength (in angstroms) of that line you seek to identify in your spectra '\
'[Default: HK lines are at about 3950]: ')
if self.uline:
self.uline = np.float(self.uline)
else:
self.uline = 3950.0
while not _est_enter:
if self.est_pre_z == '1':
self.z_prior_width = 0.06
print 'redshift prior width has been set to',self.z_prior_width
_est_enter = True
elif self.est_pre_z == '2':
self.z_prior_width = 0.06
print 'redshift prior width has been set to',self.z_prior_width
_est_enter = True
elif self.est_pre_z == '3':
self.z_prior_width = 0.06
_est_enter = True
else:
self.est_pre_z = raw_input('Incorrect entry: Please enter either (1), (2), or (3).')
#remind user to set the correct values in next step
if self.est_pre_z == '1':
print 'Make sure to set the gal_prior argument to the value of the known redshift prior: '\
'[Example: z_est.redshift_estimate(gal_prior=0.1)]'
#postconditions
assert self.est_pre_z, "Must define redshift prior flag"
assert self.est_pre_z == '1' or self.est_pre_z == '2' or self.est_pre_z == '3', \
"Incorrect string value for prior"
def redshift_estimate(self,early_type_wave,early_type_flux,wave,Flux_science,plotlines=None,template_id='Galaxy',gal_prior=None):
'''
estimate redshift for object
'''
#manage redshift prior
self.gal_prior = gal_prior
self.template_number = template_id
self.plotlines = plotlines
#continuum subtract
Flux_sc = Flux_science - signal.medfilt(Flux_science,171)
early_type_flux_sc = early_type_flux - signal.medfilt(early_type_flux,171)
#handle single redshift prior flag
if self.est_pre_z == '1':
if self.gal_prior:
self.pre_z_est = self.gal_prior
else:
nospec = raw_input('You said you are either using a spectroscopic or photometric redshift prior. '\
'You need to specify a prior value! Either enter a number in now or type (q) to exit')
if nospec == 'q':
sys.exit()
elif not nospec:
sys.exit()
else:
self.gal_prior = np.float(nospec)
self.pre_z_est = self.gal_prior
#handle user prior flag
if self.est_pre_z == '2':
print 'Take a look at the plotted galaxy spectrum and note, approximately, at what wavelength do you see the '+self.uline_n+' line. '\
'Then close the plot and enter that wavelength in angstroms.'
plt.plot(wave,Flux_science)
plt.xlim(self.lower_w,self.upper_w)
plt.show()
line_init = raw_input(self.uline_n+' approx. wavelength (A): ')
self.pre_z_est = np.float(line_init)/self.uline - 1
#handle no prior flag
if self.est_pre_z == '3':
self.pre_z_est = None
redshift_est,cor,ztest,corr_val = self._cross_cor(self.pre_z_est,self.z_prior_width,early_type_wave,early_type_flux_sc,wave,Flux_sc)
self.qualityval = 1
self.first_pass = True
self.skip_spec_flag = False
self._GUI_display(redshift_est,ztest,corr_val,wave,Flux_science)
#self.line_est = Estimateline(self.pspec,ax)
#plt.show()
try:
self.pre_lam_est = self.line_est.lam
self.pre_z_est = self.pre_lam_est/3950.0 - 1.0
self.first_pass = False
redshift_est,cor,ztest,corr_val = self._cross_cor(self.pre_z_est,self.z_prior_width,early_type_wave,early_type_flux_sc,wave,Flux_sc)
print 'redshift est:',redshift_est
self._GUI_display(redshift_est,ztest,corr_val,wave,Flux_science)
redshift_est = self.spectra2.finalz
except AttributeError:
pass
print 'zpy redshift estimate',redshift_est
return redshift_est,cor,ztest,corr_val,self.qualityval
def _cross_cor(self,z_est,unc,early_type_wave,early_type_flux,wave,Flux_sc):
'''
This function cross-correlates a continuum subtracted template spectrum with a continuum subtracted observed spectrum.
It then returns an estimate of the redshift, the correlation value at that redshift, the array of redshifts tested,
and the unnormalized correlation value.
'''
#loop over each possible redshift to compute correlation values
for i in range(self.ztest.size):
z = self.ztest[i]
#redshift the template wavelengths
wshift = early_type_wave*(1+z)
#identify the wavelength diff between the lower wave limit and the redshifted template spectrum
wavediff = np.min(wshift - self.lower_w)
#if the limit is above the minimum wavelength of the redshifted template spectrum...
if wavediff < 0:
wave_range = wave[np.where((wave<self.upper_w)&(wave>self.lower_w))]
Flux_range = Flux_sc[np.where((wave<self.upper_w)&(wave>self.lower_w))]
#if the limit is below the minimum wavelength of the redshifted template spectrum...
else:
wave_range = wave[np.where((wave<self.upper_w+wavediff)&(wave>self.lower_w+wavediff))]
Flux_range = Flux_sc[np.where((wave<self.upper_w+wavediff)&(wave>self.lower_w+wavediff))]
#interpolate the redshifted template spectrum and estimate the flux at the observed spectrum wavelengths
inter = interp1d(wshift,early_type_flux)
et_flux_range = inter(wave_range)
#calculate the pearson r correlation value between the observed and template flux
self.corr_val_i[i] = pearsonr(et_flux_range,Flux_range)[0]
#normalize the correlation values as a function of redshift
corr_val = (self.corr_val_i[np.isfinite(self.corr_val_i)]+1)/np.trapz((self.corr_val_i[np.isfinite(self.corr_val_i)]+1),self.ztest[np.isfinite(self.corr_val_i)])
self.ztest = self.ztest[np.isfinite(self.corr_val_i)]
#multiply in prior to likelihood if specified
self.corr_prior = np.zeros(self.ztest.size)
if z_est:
rv = norm(z_est,unc)
corr_val = corr_val * rv.pdf(self.ztest)
self.corr_prior = rv.pdf(self.ztest)
#make redshift estimate
redshift_est = (self.ztest[np.where((self.ztest>self.lower_z)&(self.ztest<self.upper_z))])[np.where(corr_val[np.where((self.ztest>self.lower_z)&(self.ztest<self.upper_z))] == np.max(corr_val[np.where((self.ztest>self.lower_z)&(self.ztest<self.upper_z))]))]
#save correlation value at maximum redshift likelihood
cor = (self.corr_val_i[np.where((self.ztest>self.lower_z)&(self.ztest<self.upper_z))])[np.where(corr_val[np.where((self.ztest>self.lower_z)&(self.ztest<self.upper_z))] == np.max(corr_val[np.where((self.ztest>self.lower_z)&(self.ztest<self.upper_z))]))]
return redshift_est[0], cor, self.ztest,corr_val
def _GUI_display(self,redshift_est,ztest,corr_val,wave,flux_sc):
'''Display the spectrum and reference lines.'''
self.fig = plt.figure(figsize=(10, 8))
gs = gridspec.GridSpec(2, 1, height_ratios=[2, 1])
ax2 = plt.subplot(gs[0])
ax = plt.subplot(gs[1])
#maximize window
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.subplots_adjust(top=0.96,bottom=0.1,left=0.04,right=0.80)
ax.plot(ztest,corr_val,'b')
pspec_corr = ax.axvline(redshift_est,color='k',ls='--')
ax.fill_between(self.ztest,np.zeros(self.ztest.size),self.corr_prior,facecolor='grey',alpha=0.6)
ax.set_xlabel('Redshift')
ax.set_ylabel('Correlation')
self.pspec, = ax2.plot(wave,flux_sc)
ax2.set_ylim(np.min(flux_sc),np.max(flux_sc))
ax2.set_xlim(wave[0],wave[-1])
#plot lines
vlin_pspecs = []
if self.plotlines == None:
self.plotlines = {'HKlines':[3968.5,3933.7],'emission_lines':[3725.0,4959.0,5007.0],'absorption_lines':[4102.9,4304.0,4862.0,5175.0],'sky_lines':[]}
else: pass
try:
for vlin in self.plotlines['HKlines']:
vlin_pspecs.append(ax2.axvline(vlin*(1+redshift_est),ls='--',alpha=0.7,c='red'))
except KeyError: pass
try:
for vlin in self.plotlines['emission_lines']:
vlin_pspecs.append(ax2.axvline(vlin*(1+redshift_est),ls='--',alpha=0.7,c='blue'))
except KeyError: pass
try:
for vlin in self.plotlines['absorption_lines']:
vlin_pspecs.append(ax2.axvline(vlin*(1+redshift_est),ls='--',alpha=0.7,c='orange'))
except KeyError: pass
try:
for vlin in self.plotlines['sky_lines']:
vlin_pspecs.append(ax2.axvline(vlin*(1+redshift_est),ls='--',alpha=0.7,c='grey'))
except KeyError: pass
if self.first_pass:
self.line_est = Estimateline(self.pspec,ax2,self.uline_n)
self.fig.text(0.83, 0.9, 'Template {0}'.format(self.template_number), bbox=dict(facecolor='white', alpha=1.),fontsize=18)
rax = plt.axes([0.83, 0.43, 0.15, 0.22])
if self.qualityval == 1:
radio = RadioButtons(rax, ('1 - No Clue ','2 - Slight\n Chance', '3 - Maybe', '4 - Probably', '5 - Clear'))
else:
radio = RadioButtons(rax, ('1 - No Clue ','2 - Slight\n Chance', '3 - Maybe', '4 - Probably', '5 - Clear'),active=1)
def qualfunc(label):
if label == '5 - Clear':
self.qualityval = 5
elif label == '4 - Probably':
self.qualityval = 4
elif label == '3 - Maybe':
self.qualityval = 3
elif label == '2 - Slight\n Chance':
self.qualityval = 2
else:
self.qualityval = 1
radio.on_clicked(qualfunc)
closeax = plt.axes([0.83, 0.3, 0.15, 0.1])
button = Button(closeax, 'Accept & Close', hovercolor='0.975')
def closeplot(event):
plt.close()
button.on_clicked(closeplot)
skip_spec_ax = plt.axes([0.83, 0.94, 0.15, 0.04])
skip_button = Button(skip_spec_ax, 'skip spectra', hovercolor='0.975')
def skip_spec(event):
plt.close()
self.qualityval = 0
self.skip_spec_flag = True
skip_button.on_clicked(skip_spec)
ax2.set_xlim(self.lower_w,self.upper_w)
ax2.set_xlabel('Wavelength (A)')
ax2.set_ylabel('Counts')
plt.draw()
if not self.first_pass:
self.spectra2 = DragSpectra(vlin_pspecs,pspec_corr,redshift_est,ax2)
self.fig.canvas.mpl_connect('motion_notify_event',self.spectra2.on_motion)
self.fig.canvas.mpl_connect('button_press_event',self.spectra2.on_press)
self.fig.canvas.mpl_connect('button_release_event',self.spectra2.on_release)
plt.show()
class DragSpectra:
'''Class to drage the spectra back and forth to match lines of interest'''
def __init__(self,vlin_spectra,corr_spec,redshift_estimate,ax5):
self.ax5 = ax5
self.corr_spec = corr_spec
self.yzs = self.corr_spec.get_data()[1]
print 'begin shift'
self.vlin_spectra = vlin_spectra
self.vline_ys = vlin_spectra[0].get_data()[1]
self.pressed = False
self.finalz = redshift_estimate
#figure.canvas.mpl_connect('motion_notify_event',self.on_motion)
#figure.canvas.mpl_connect('button_press_event',self.on_press)
#figure.canvas.mpl_connect('button_release_event',self.on_release)
def on_motion(self,evt):
if self.pressed:
#dx = evt.xdata - self.mouse_x
#print "%d %d" % (evt.xdata,self.mouse_x)
newz = ((evt.xdata/self.mouse_x)*(1.+self.z_on_press))-1. #((1. + (dx/self.mouse_x))*(1.+self.z0))-1.
newxs = self.vline_lams*(evt.xdata/self.mouse_x) # equivalent to spec_x*((1+newz)/(1+z0))
for i in np.arange(len(self.vlin_spectra)):
self.vlin_spectra[i].set_data([newxs[i], newxs[i]], self.vline_ys)
self.corr_spec.set_data([newz, newz], self.yzs)
plt.draw()
def on_press(self,evt):
if evt.inaxes == self.ax5:
self.mouse_x = evt.xdata
self.z_on_press = self.corr_spec.get_data()[0][0]
self.vline_lams = np.array([self.vlin_spectra[x].get_data()[0][0] for x in np.arange(len(self.vlin_spectra))])
self.pressed = True
else: return
def on_release(self,evt):
if evt.inaxes == self.ax5:
self.pressed = False
try:
self.finalz = self.corr_spec.get_data()[0][0]
except AttributeError:
self.finalz = self.finalz
else: return
if __name__ == '__main__':
R = z_est()
|
giffordw/zpy
|
zpy/__init__.py
|
Python
|
mit
| 17,230
|
[
"Galaxy"
] |
a5c6dad6520436069e8026796ada24ca507c5781e113594f6382c1bf3cd60690
|
# Copyright 1999 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from __future__ import print_function
import os.path
import unittest
import shutil
from Bio._py3k import StringIO
import tempfile
from Bio import File
data = """This
is
a multi-line
file"""
# UndoHandle
h = File.UndoHandle(StringIO(data))
print(h.readline()) # 'This'
print(h.peekline()) # 'is'
print(h.readline()) # 'is'
h.saveline("saved")
print(h.peekline()) # 'saved'
h.saveline("another")
print(h.readline()) # 'another'
print(h.readline()) # 'saved'
# Test readlines after saveline
h.saveline("saved again")
lines = h.readlines()
print(repr(lines[0])) # 'saved again'
print(repr(lines[1])) # 'a multi-line'
print(repr(lines[2])) # 'file'
# should be empty now
print(repr(h.readline())) # ''
h.saveline("save after empty")
print(h.readline()) # 'save after empty'
print(repr(h.readline())) # ''
# test read method
h = File.UndoHandle(StringIO("some text"))
h.saveline("more text")
print(h.read()) # 'more textsome text'
class AsHandleTestCase(unittest.TestCase):
def setUp(self):
# Create a directory to work in
self.temp_dir = tempfile.mkdtemp(prefix='biopython-test')
def tearDown(self):
shutil.rmtree(self.temp_dir)
def _path(self, *args):
return os.path.join(self.temp_dir, *args)
def test_handle(self):
"Test as_handle with a file-like object argument"
p = self._path('test_file.fasta')
with open(p, 'wb') as fp:
with File.as_handle(fp) as handle:
self.assertEqual(fp, handle, "as_handle should "
"return argument when given a file-like object")
self.assertFalse(handle.closed)
self.assertFalse(handle.closed,
"Exiting as_handle given a file-like object should not "
"close the file")
def test_path(self):
"Test as_handle with a path argument"
p = self._path('test_file.fasta')
mode = 'wb'
with File.as_handle(p, mode=mode) as handle:
self.assertEqual(p, handle.name)
self.assertEqual(mode, handle.mode)
self.assertFalse(handle.closed)
self.assertTrue(handle.closed)
def test_stringio(self):
s = StringIO()
with File.as_handle(s) as handle:
self.assertEqual(s, handle)
|
updownlife/multipleK
|
dependencies/biopython-1.65/Tests/test_File.py
|
Python
|
gpl-2.0
| 2,588
|
[
"Biopython"
] |
e125af4b2929ffa2224e06f13fa4c14cda25d7b16cb1af04ab1348ababbfabe1
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.compiled
~~~~~~~~~~~~~~~~~~~~~~~~
Just export lexer classes previously contained in this module.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexers.jvm import JavaLexer, ScalaLexer
from pygments.lexers.c_cpp import CLexer, CppLexer
from pygments.lexers.d import DLexer
from pygments.lexers.objective import ObjectiveCLexer, \
ObjectiveCppLexer, LogosLexer
from pygments.lexers.go import GoLexer
from pygments.lexers.rust import RustLexer
from pygments.lexers.c_like import ECLexer, ValaLexer, CudaLexer
from pygments.lexers.pascal import DelphiLexer, Modula2Lexer, AdaLexer
from pygments.lexers.business import CobolLexer, CobolFreeformatLexer
from pygments.lexers.fortran import FortranLexer
from pygments.lexers.prolog import PrologLexer
from pygments.lexers.python import CythonLexer
from pygments.lexers.graphics import GLShaderLexer
from pygments.lexers.ml import OcamlLexer
from pygments.lexers.basic import BlitzBasicLexer, BlitzMaxLexer, MonkeyLexer
from pygments.lexers.dylan import DylanLexer, DylanLidLexer, DylanConsoleLexer
from pygments.lexers.ooc import OocLexer
from pygments.lexers.felix import FelixLexer
from pygments.lexers.nimrod import NimrodLexer
from pygments.lexers.crystal import CrystalLexer
__all__ = []
|
wandb/client
|
wandb/vendor/pygments/lexers/compiled.py
|
Python
|
mit
| 1,385
|
[
"CRYSTAL"
] |
f5dfd70d5ef9162fc49ebb153c07b17ecf0b440cac7e2c6964336a5448631945
|
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2015 Digi International Inc., All Rights Reserved.
#
from hamcrest import assert_that, ends_with, is_, contains_string
from nose import with_setup
import splinter_tests
from utils import (delete_all_dashboards, log_in_clean, add_empty_dashboard,
screenshot_on_exception, do_sleep)
browser = None
def setup():
splinter_tests.start_browser()
global browser
browser = splinter_tests.browser
def teardown():
splinter_tests.kill_browser()
def log_in(*auth):
delete_all_dashboards(*auth)
add_empty_dashboard(*auth)
log_in_clean(*auth)
splinter_tests.visit("/#/add_widget")
# Wait a moment for everything to settle
do_sleep()
with screenshot_on_exception("log_in_not_add_widget"):
assert_that(browser.url, ends_with('/#/add_widget'))
log_in_before = with_setup(lambda: log_in("e2e_user", "e2e_password",
"e2e_fqdn"))
log_in_test_before = with_setup(lambda: log_in("test_user", "e2e_password",
"login.etherios.com"))
def is_error_message_present(key, message):
xpath = ("//div[contains(@class, 'alert alert-danger')]/ul/li"
"/strong[contains(., '{}')]/..").format(key)
match = browser.find_by_xpath(xpath)
# Check that we found a matching error message
with screenshot_on_exception("find_error_%s" % key):
assert not match.is_empty(), "No error message for %s" % key
assert_that(match.first.text, contains_string(message))
@log_in_before
def test_initial_errors():
# We should start out with 4 errors, related to Widget Type, Label,
# Gateway, and XBee Module
errors = [
("Type", "Invalid widget type selection."),
("Label", "This field is required."),
("Gateway", "You need to select a gateway."),
("XBee Module", "You need to select an XBee module.")
]
for key, message in errors:
fn = lambda: is_error_message_present(key, message)
fn.description = "Error message '{}: {}' present?".format(key, message)
yield fn
# Select serial data widget - now the Type error goes away?
options = browser.find_by_css("select#type option")
assert not options.is_empty()
def check_gone():
# Select Serial Data Widget
splinter_tests.select_by_text("type", "Serial Data Widget")
with screenshot_on_exception("type_error_still_present"):
assert not browser.is_text_present(
"Invalid widget type selection.")
check_gone.description = ("Error message about widget type is gone after "
"picking serial widget?")
yield check_gone
for key, message in ((k,m) for k,m in errors if k != "Type"):
fn = lambda: is_error_message_present(key, message)
fn.description = "Error message '{}: {}' still present?".format(key, message)
yield fn
@log_in_test_before
def test_check_configuration():
'''The 'Check Radio Configuration' button should appear when expected.'''
# Locate the button by xpath, and not using its name, because we only
# expect to see the one button, and finding it by its text is more flexible
# (and closer to what the user would do).
button_xpath = '//button[contains(., "Check Radio Configuration")]'
def button_missing(missing=True):
with screenshot_on_exception("button_missing_assert_%s" % missing):
buttons = browser.find_by_xpath(button_xpath)
assert_that(buttons.is_empty(), is_(missing))
# The button should not be present.
button_missing()
# Get the list of widget types, and their associated value in the select
# tag.
options = browser.find_by_css("select#type option")
assert not options.is_empty()
optmap = {}
for opt in options:
optmap[opt.text] = opt.value
# Select switch widget
browser.select("type", optmap['On/Off Switch Widget'])
# The button should not be present.
button_missing()
# Select the first gateway in the list
browser.select("device", '0')
# The button should not be present.
button_missing()
# Wait for the discovery to complete. For the e2e server this should only
# take a second or two
do_sleep(multiplier=2)
# Pick the first XBee in the list
browser.select('xbeenode', '0')
# The button should not be present.
button_missing()
# Pick the first stream option (DIO0)
xpath = '//select[@id="sets"]//option[.="DIO0"]'
# Copied from splinter browser.select implementation
browser.find_by_xpath(xpath).first._element.click()
# The button should be present now.
button_missing(False)
# Execute a basic test of the configuration modal.
# Check that it can be brought up using the 'Check Radio Configuration' button,
# and that it disappears if the user clicks the 'Cancel' button.
@log_in_test_before
def test_configuration_modal_basic():
'''The configuration modal should appear when the button is clicked.'''
with screenshot_on_exception("config_modal_basic"):
# Get the list of widget types, and their associated value in the
# select tag.
options = browser.find_by_css("select#type option")
assert not options.is_empty()
optmap = {}
for opt in options:
optmap[opt.text] = opt.value
# Select switch widget
browser.select("type", optmap['On/Off Switch Widget'])
# Select the first gateway in the list
browser.select("device", '0')
# Wait for the discovery to complete. For the e2e server this should
# only take a second or two
do_sleep()
# Pick the first XBee in the list
browser.select('xbeenode', '0')
# Pick the first stream option (DIO0)
xpath = '//select[@id="sets"]//option[.="DIO0"]'
# Copied from splinter browser.select implementation
browser.find_by_xpath(xpath).first._element.click()
# The button should be present now. But first, make sure the modal is
# not present currently, to be safe.
assert not browser.is_text_present("Device IO Configuration")
button = browser.find_by_name("checkRadioConfig")
assert not button.is_empty()
button.first.click()
# Check the modal appears
assert browser.is_text_present("Device IO Configuration", wait_time=1)
# Check that there is a close button
xpath = '//div[@class="modal-footer"]/button[.="Close"]'
btn = browser.find_by_xpath(xpath)
assert not btn.is_empty()
# Click the close button - does the modal go away?
btn.first.click()
# Wait a moment
do_sleep()
assert not browser.is_text_present("Device IO Configuration")
assert browser.find_by_xpath(xpath).is_empty()
|
brucetsao/XBeeZigBeeCloudKit
|
splinter_tests/test_add_widget_page.py
|
Python
|
mpl-2.0
| 7,121
|
[
"VisIt"
] |
78cf56d981d33f81a56c8bf6326c9dc808d1ba88608b28355558e589fb34355c
|
import moogli
import moose
from moose import neuroml
from PyQt4 import Qt, QtCore, QtGui
import sys
import os
app = QtGui.QApplication(sys.argv)
filename = os.path.join( os.path.split(os.path.realpath(__file__))[0]
, "../neuroml/PurkinjeCellPassivePulseInput/PurkinjePassive.net.xml"
)
moose.neuroml.loadNeuroML_L123(filename)
morphology_distal = moogli.read_morphology_from_moose(name = "", path = "/cells[0]", radius = moogli.DISTAL)
viewer_distal = moogli.MorphologyViewerWidget(morphology_distal)
viewer_distal.setWindowTitle("Distal Radius")
morphology_averaged = moogli.read_morphology_from_moose(name = "", path = "/cells[0]", radius = moogli.AVERAGED)
viewer_averaged = moogli.MorphologyViewerWidget(morphology_averaged)
viewer_averaged.setWindowTitle("Averaged Radius")
morphology_proximal_distal = moogli.read_morphology_from_moose(name = "", path = "/cells[0]", radius = moogli.PROXIMAL_DISTAL)
viewer_proximal_distal = moogli.MorphologyViewerWidget(morphology_proximal_distal)
viewer_proximal_distal.setWindowTitle("Continous Variation in Radius")
viewer_distal.show()
viewer_averaged.show()
viewer_proximal_distal.show()
app.exec_()
|
dilawar/moose-full
|
moose-examples/moogli/simple_viewing.py
|
Python
|
gpl-2.0
| 1,196
|
[
"MOOSE"
] |
e2172471b41b6647ad377a0075c5cc1b74391910f9dfafe32ab9ea18a643bbc0
|
"""
climatology.py
Compute a multi-epoch (multi-day) climatology from daily SST Level-3 grids.
Simple code to be run on Spark cluster, or using multi-core parallelism on single machine.
"""
import sys, os, calendar, urlparse, urllib
from datetime import datetime
import numpy as N
from variables import getVariables, close
#from timePartitions import partitionFilesByKey
from split import fixedSplit
#from stats import Stats
from pathos.multiprocessing import ProcessingPool as Pool
from plotlib import imageMap, makeMovie
#from gaussInterp import gaussInterp
VERBOSE = 1
# Possible execution modes
# Multicore & cluster modes use pathos pool.map(); Spark mode uses PySpark cluster.
ExecutionModes = ['sequential', 'multicore', 'cluster', 'spark']
# SST L3m 4.6km Metadata
# SST calues are scaled integers in degrees Celsius, lat/lon is 4320 x 8640
# Variable = 'sst', Mask = 'qual_sst', Coordinates = ['lat', 'lon']
# Generate algorithmic name for N-day Climatology product
SSTClimatologyTemplate = 'SST.L3.Global.Clim.%(period)s.%(date)s.%(version)s.nc' #??
# Simple mask and average functions to get us started, then add gaussian interpolation.
# MODIS L3 SST product, qual_sst is [-1, 2] - =0 is best data, can add =1 for better coverage
def qcMask(var, mask): return N.ma.array(var, mask=N.ma.make_mask(mask))
#def qcMask(var, mask): return N.ma.masked_where(mask != 0, var)
def average(a): return N.ma.mean(a, axis=0)
#AveragingFunctions = {'pixelAverage': average, 'gaussInterp': gaussInterp}
AveragingFunctions = {'pixelAverage': average, 'gaussInterp': average}
def climByAveragingPeriods(urls, # list of (daily) granule URLs for a long time period (e.g. a year)
nEpochs, # compute a climatology for every N epochs (days) by 'averaging'
nWindow, # number of epochs in window needed for averaging
variable, # name of primary variable in file
mask, # name of mask variable
coordinates, # names of coordinate arrays to read and pass on (e.g. 'lat' and 'lon')
maskFn=qcMask, # mask function to compute mask from mask variable
averager='pixelAverage', # averaging function to use, one of ['pixelAverage', 'gaussInterp']
mode='sequential', # Map across time periods of N-days for concurrent work, executed by:
# 'sequential' map, 'multicore' using pool.map(), 'cluster' using pathos pool.map(),
# or 'spark' using PySpark
numNodes=1, # number of cluster nodes to use
nWorkers=4, # number of parallel workers per node
averagingFunctions=AveragingFunctions, # dict of possible averaging functions
legalModes=ExecutionModes # list of possiblel execution modes
):
'''Compute a climatology every N days by applying a mask and averaging function.
Writes the averaged variable grid, attributes of the primary variable, and the coordinate arrays in a dictionary.
***Assumption: This routine assumes that the N grids will fit in memory.***
'''
try:
averageFn = averagingFunctions[averager]
except :
averageFn = average
print >>sys.stderr, 'climatology: Error, Averaging function must be one of: %s' % str(averagingFunctions)
urlSplits = [s for s in fixedSplit(urls, nEpochs)]
if VERBOSE: print >>sys.stderr, urlSplits
def climsContoured(urls):
n = len(urls)
var = climByAveraging(urls, variable, mask, coordinates, maskFn, averageFn)
return contourMap(var, variable, coordinates, n, urls[0])
if mode == 'sequential':
plots = map(climsContoured, urlSplits)
elif mode == 'multicore':
pool = Pool(nWorkers)
plots = pool.map(climsContoured, urlSplits)
elif mode == 'cluster':
pass
elif mode == 'spark':
pass
plots = map(climsContoured, urlSplits)
print plots
return plots
# return makeMovie(plots, 'clim.mpg')
def climByAveraging(urls, # list of granule URLs for a time period
variable, # name of primary variable in file
mask, # name of mask variable
coordinates, # names of coordinate arrays to read and pass on (e.g. 'lat' and 'lon')
maskFn=qcMask, # mask function to compute mask from mask variable
averageFn=average # averaging function to use
):
'''Compute a climatology over N arrays by applying a mask and averaging function.
Returns the averaged variable grid, attributes of the primary variable, and the coordinate arrays in a dictionary.
***Assumption: This routine assumes that the N grids will fit in memory.***
'''
n = len(urls)
varList = [variable, mask]
for i, url in enumerate(urls):
fn = retrieveFile(url, '~/cache')
if VERBOSE: print >>sys.stderr, 'Read variables and mask ...'
var, fh = getVariables(fn, varList) # return dict of variable objects by name
if i == 0:
dtype = var[variable].dtype
shape = (n,) + var[variable].shape
accum = N.ma.empty(shape, dtype)
v = maskFn(var[variable], var[mask]) # apply quality mask variable to get numpy MA
# v = var[variable][:]
accum[i] = v # accumulate N arrays for 'averaging'
if i+1 != len(urls): # keep var dictionary from last file to grab metadata
close(fh) # REMEMBER: closing fh loses in-memory data structures
if VERBOSE: print >>sys.stderr, 'Averaging ...'
coord, fh = getVariables(fn, coordinates) # read coordinate arrays and add to dict
for c in coordinates: var[c] = coord[c][:]
if averageFn == average:
avg = averageFn(accum) # call averaging function
else:
var[variable] = accum
if averageFn == gaussInterp:
varNames = variable + coordinates
avg, vweight, status = \
gaussInterp(var, varNames, latGrid, lonGrid, wlat, wlon, slat, slon, stime, vfactor, missingValue)
var['attributes'] = var[variable].__dict__ # save attributes of primary variable
var[variable] = avg # return primary variable & mask arrays in dict
var[mask] = N.ma.getmask(avg)
# close(fh) # Can't close, lose netCDF4.Variable objects, leaking two fh
return var
def contourMap(var, variable, coordinates, n, url):
p = urlparse.urlparse(url)
filename = os.path.split(p.path)[1]
return filename
outFile = filename + '.png'
# Downscale variable array (SST) before contouring, matplotlib is TOO slow on large arrays
vals = var[variable][:]
# Fixed color scale, write file, turn off auto borders, set title, reverse lat direction so monotonically increasing??
imageMap(var[coordinates[1]][:], var[coordinates[0]][:], var[variable][:],
vmin=-2., vmax=45., outFile=outFile, autoBorders=False,
title='%s %d-day Mean from %s' % (variable.upper(), n, filename))
print >>sys.stderr, 'Writing contour plot to %s' % outFile
return outFile
def isLocalFile(url):
'''Check if URL is a local path.'''
u = urlparse.urlparse(url)
if u.scheme == '' or u.scheme == 'file':
if not path.exists(u.path):
print >>sys.stderr, 'isLocalFile: File at local path does not exist: %s' % u.path
return (True, u.path)
else:
return (False, u.path)
def retrieveFile(url, dir=None):
'''Retrieve a file from a URL, or if it is a local path then verify it exists.'''
if dir is None: dir = './'
ok, path = isLocalFile(url)
fn = os.path.split(path)[1]
outPath = os.path.join(dir, fn)
if not ok:
if os.path.exists(outPath):
print >>sys.stderr, 'retrieveFile: Using cached file: %s' % outPath
else:
try:
print >>sys.stderr, 'retrieveFile: Retrieving (URL) %s to %s' % (url, outPath)
urllib.urlretrieve(url, outPath)
except:
print >>sys.stderr, 'retrieveFile: Cannot retrieve file at URL: %s' % url
return None
return outPath
def dailyFile2date(path, offset=1):
'''Convert YYYYDOY string in filename to date.'''
fn = os.path.split(path)[1]
year = int(fn[offset:offset+4])
doy = int(fn[offset+5:offset+8])
return fn[5:15].replace('.', '/')
def formatRegion(r):
"""Format lat/lon region specifier as string suitable for file name."""
if isinstance(r, str):
return r
else:
strs = [str(i).replace('-', 'm') for i in r]
return 'region-%s-%sby%s-%s' % tuple(strs)
def formatGrid(r):
"""Format lat/lon grid resolution specifier as string suitable for file name."""
if isinstance(r, str):
return r
else:
return str(r[0]) + 'by' + str(r[1])
def main(args):
nEpochs = int(args[0])
nWindow = int(args[1])
averager = args[2]
mode = args[3]
nWorkers = int(args[4])
urlFile = args[5]
urls = [s.strip() for s in open(urlFile, 'r')]
return climByAveragingPeriods(urls, nEpochs, nWindow, 'sst', 'qual_sst', ['lat', 'lon'],
averager=averager, mode=mode, nWorkers=nWorkers)
if __name__ == '__main__':
print main(sys.argv[1:])
# python climatology.py 5 5 pixelAverage sequential 1 urls_sst_10days.txt
# python climatology.py 5 5 gaussianInterp multicore 8 urls_sst_40days.txt
|
dataplumber/nexus
|
climatology/clim/climatology1.py
|
Python
|
apache-2.0
| 10,033
|
[
"Gaussian"
] |
4d27966852158a5b1d13b15304b006c6a676ab190f22b276979c6e57dd6889df
|
# -*- coding: utf-8 -*-
#
# Copyright (c), 2015-2016, Quantum Espresso Foundation and SISSA (Scuola
# Internazionale Superiore di Studi Avanzati). All rights reserved.
# This file is distributed under the terms of the MIT License. See the
# file 'LICENSE' in the root directory of the present distribution, or
# http://opensource.org/licenses/MIT.
# Authors: Davide Brunato
#
import logging
# QEspresso imports
from .documents import QeDocument, PwDocument, PhononDocument, NebDocument
from .converters import RawInputConverter, PwInputConverter, PhononInputConverter, NebInputConverter
from .exceptions import ConfigError
from .xsdtypes import XSD_BUILTIN_TYPES, XMLSchema
from .utils.logger import set_logger
logger = logging.getLogger('qespresso')
set_logger(1)
__all__ = [
'set_logger', 'ConfigError',
'QeDocument', 'PWConfiguration', 'PhononDocument',
'RawInputConverter', 'PwInputConverter', 'PhononInputConverter'
]
|
afonari/q-e_schrodinger
|
bin/qexsd/qespresso/__init__.py
|
Python
|
gpl-2.0
| 938
|
[
"Quantum ESPRESSO"
] |
f11348f7497e6e21202ddfc3f4a69907509be48de5dd7f551ddbac539aec6c6d
|
import tkSimpleDialog
import tkMessageBox
#import p3d.protein
#import p3d.geo
from pymol.wizard import Wizard
from pymol import cmd, util
from pymol.controlling import mode_dict
class Bond(object):
def __init__(self,bond1,bond2,resid1,resid2):
if bond2 > bond1:
self.bond1=bond1
self.bond2=bond2
self.resid1=resid1
self.resid2=resid2
else:
self.bond1=bond2
self.bond2=bond1
self.resid1=resid2
self.resid2=resid1
self.indexes=[self.bond1,self.bond2]
class selector_prot(Wizard):
def __init__(self,name,chain,resid,resid2,_self=cmd):
Wizard.__init__(self,_self)
self.resid = resid
self.resid2 = resid2
self.name = name
self.chain = chain
self.extend = 1
self.bonds=[]
self.resids=[]
self.indexes=[]
self.load=None
self.lead=0
def get_panel(self):
label = 'No Mutation'
return [
[ 1, 'Select Rotatable Bonds',''],
[ 1, 'for Residue '+ self.resid ,''],
[ 2, 'Pick Bond' , 'cmd.get_wizard().apply()'],
[ 2, 'Rotate View' , 'cmd.get_wizard().rotate()'],
[ 2, 'Show More Bonds' , 'cmd.get_wizard().show()'],
[ 2, 'Pick Rotatable Section' , 'cmd.get_wizard().srot()'],
[ 2, 'Write Bonds' , 'cmd.get_wizard().set_bonds()'],
[ 2, 'Reset Selected Bonds' , 'cmd.get_wizard().reset()'],
[ 2, 'Finished' , 'cmd.get_wizard().clear()'],
]
def srot(self):
cmd.deselect()
#self.pk2_st=None
self.load=1
self.get_prompt()
print "Testing", self.lead
cmd.config_mouse('three_button_editing')
def show(self):
left = str(int(self.resid)-self.extend)
right = str(int(self.resid)+self.extend)
cmd.show('lines','resid '+left+':'+right)
cmd.zoom('resid '+left+':'+right)
self.extend = self.extend+1
#def isbonded(self,bond0,bond1,stems):
# nextres = 0
# for stem in stems:
# if bond0==stem:
# nextres=bond1
# if bond1==stem:
# nextres=bond0
# return nextres
def get_bonds(self,stems,allbonds,rot_bonds=[]):
nextbonds = []
for stem in stems:
print "STEM", stem
for bond in allbonds:
#print bond.index
if stem in bond.index: #save next bond
print bond.index,"matched bond"
for n in bond.index:
if n != stem: #find next atom
if n not in rot_bonds: #if atom is new:
nextbonds.append(n)
#return indexes connected to stem
return nextbonds
def is_in_bonds(self,stem,bonds):
yes = 0
for bond in bonds:
if stem in bond.indexes:
yes = 1
return yes
def is_in_multiple_bonds(self,stem,bonds):
count = 0
for bond in bonds:
if stem in bond.indexes:
count = count + 1
if count == 2:
return True
else:
return False
#def reset_bond(self,known,bonds): #reset bond, if repeated index save repeat
# ret = []
# print "reset_bond"
# print known, "known"
# for rbon in bonds: #for each rot bond
# if known[0] in rbon.indexes:
# if known[1] not in rbon.indexes:
# ret = [known[1]]
# if known[1] in rbon.indexes:
# if known[0] not in rbon.indexes:
# ret = [known[0]]
# return ret
def set_bonds(self):
startingbond=[]
rangev = []
if self.lead==0:
print "Must select rotatable section first"
elif len(self.bonds)==0:
print "Must select at least one rotatable bonds"
else:
mres = min(self.resids)
xres = max(self.resids)
model = cmd.get_names('resid '+str(self.resid)+':'+str(self.resid2))
allbonds = model.bond
'''
Removed efficiency code to test end residue labeling - will be slow
if mres != xres: #multires case
mind = min(self.indexes)
xind = max(self.indexes)
irange = [mind,xind] #range of indexes we care about for bonding pattern
if self.lead < mind:
irange = [self.lead,xind]
if self.lead > xind:
irange = [mind,self.lead]
limitedset = []
we want to limit allbonds to a limited index range
for efficiency-may be problem if indexes are really screwed up
for b in allbonds:
if b.index[0] in range(irange[0],irange[1]) or \
b.index[1] in range(irange[0],irange[1]):
limitedset.append(b)
allbonds = limitedset
'''
#Remove dummy atom-for bonding only, will still be rotated
dummy = 'ZZ'
reduced = []
for b in allbonds:
d = False
if self.get_atom(b.index[0])[2] == dummy or self.get_atom(b.index[1])[2] == dummy:
d = True
if d == False:
reduced.append(b)
#print self.get_atom(b.index[0]),self.get_atom(b.index[1])
#print "DONE"
allbonds = reduced
#start from rotatable selection point and find what atoms are always rotatable
rot_bonds = [self.lead]
print rot_bonds,"LEAD"
#print self.bonds
#for b in allbonds:
# print b.index
stems = self.get_bonds(rot_bonds,allbonds,rot_bonds)
nextstep=[]
while len(stems) != 0: #while a bond remains
next_stem = set() #Internal
for s in stems: #check if at rotation
if self.is_in_bonds(s,self.bonds):
if len(nextstep) == 0:
print s, "NEXTSTEP"
nextstep.append(s) #don't move beyond rotation
rot_bonds.append(s)
next_stem.add(s)
#No else - We discard any other rotatable bonds - deal with later
else:
print s, "ROT BOND"
rot_bonds.append(s)
next_stem.add(s)
stems = self.get_bonds(next_stem,allbonds,rot_bonds)
outstring = "!Rotation of dye\n"
lenv = len(self.bonds)
outstring = outstring + '!NROT '+str(lenv)+'\n'
outstring = outstring + 'cons fix sele dbackbone .or. .not. '+\
'(resid @res .and. segid @chain) end\n\n'
#now we look along rest of chain
botbonds = []
count = 0
excluded = rot_bonds #We don't want to select rotatable bonds
stems = self.get_bonds(nextstep,allbonds,excluded)
bond=nextstep #This is a rotatable object
while len(stems) != 0:
excluded=excluded+stems#don't go to a stem two times
for stem in stems:
if self.is_in_bonds(stem,self.bonds): #only care about bonds
if len(bond)==0: #we have a new end of a bond
bond.append(stem)
elif stem != bond[0]:#We have second half of new bond
new_bond = stem
bond.append(new_bond)
count = count + 1
#We need to tease out other rotatable atoms from those in stems
for stem in stems:
if self.is_in_bonds(stem,self.bonds) == False:
#Just looking at other stems-none of these
# have rotatable elements
botbonds = botbonds+[stem]
nexts = list(set(self.get_bonds([stem],allbonds,excluded)))
while len(nexts) != 0:
botbonds = botbonds+nexts
excluded = excluded+nexts #don't go to stem two times
nexts = list(set(self.get_bonds(nexts,allbonds,excluded)))
#Now write output for rotation
outstring = outstring + 'label loop'+str(count)+'\n'
outstring = outstring + self.rotate_axis(bond[0],bond[1])
outstring = outstring + self.rotate_sel(120,botbonds)
outstring = outstring + 'incr '+str(count)+' by '+str(count)+'\n'
outstring = outstring + 'goto mini \n \n'
#We check if the new_bond atom is shared
#The old atom is discarded because we don't go backwards
if self.is_in_multiple_bonds(new_bond,self.bonds):
bond = [new_bond]
else:
bond = []
botbonds=botbonds+stems
stems = list(set(self.get_bonds(stems,allbonds,excluded)))
outfile = open('../../inputs/'+self.name+'_rot.str','w')
outfile.write(outstring)
#write .str file
stream = '!The atoms that are the end of the dye\n'
stream = stream + "define dyefix sele .NOT. ( "
for bindex in botbonds:
atom = self.get_atom(bindex)
stream = stream + " chain "+atom[0]+" .and. resi " + atom[1]+" .and. name "+atom[2]+ " .OR. "
stream = stream + ' ) end\n'
outfile = open('../../inputs/'+self.name+'.str','w')
outfile.write(stream)
print "All files written for ",self.name
def get_atom(self,index):
cmd.select("_p","index "+str(index+1))#convert from internal back to
#label numbering
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_at',""name)")
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_ac',""chain)")
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_ar',""resi)")
return [str(self.pk_ac),str(self.pk_ar),str(self.pk_at)]
def rotate_axis(self,index1,index2):#print axis output
atom1=self.get_atom(index1)
atom2=self.get_atom(index2)
return "coor axis sele atom "+atom1[0]+' '+atom1[1]+' '+atom1[2]+\
" end sele atom "+atom2[0]+' '+atom2[1]+' '+atom2[2]+" end \n"
def rotate_sel(self,angle,flexbonds):#print selection output
outstring = 'coor rota axis PHI '+str(angle)+' sele dyefix '
atoms = []
print "rotate_sel", flexbonds
for index in flexbonds:
cmd.select("_p","index "+str(index+1))#convert from internal back
#to label numbering
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_at',""name)")
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_ac',""chain)")
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_ar',""resi)")
atoms.append([str(self.pk_at),str(self.pk_ac),str(self.pk_ar)])
for atom in atoms: #set(atoms): #ensure every atom is only included once
outstring = outstring + ' .or. '
outstring = outstring+'atom '+atom[1]+' '+atom[2]+' '+atom[0]
return outstring+' end \n'
def do_select(self,selection):
cmd.deselect()
def rotate(self):
mode_dict['three_button_viewing'] = [ ('l','none','rota')]
cmd.config_mouse('three_button_viewing')
def reset(self):
#cmd.color("atomic")
#cmd.set_bond("line_color","atomic","all")
#util.cbag("all")
self.bonds=[]
cmd.set_bond("line_color","green","all")
def apply(self):
mode_dict['three_button_viewing'] = [ ('l','none','PkTB')]
cmd.config_mouse('three_button_viewing')
print "Apply"
def clear(self):
cmd.quit()
def get_prompt(self):
if self.load!=None:
return ["Please pick the atom in the direction of the section you want to rotate"]
if self.pk2_st!=None:
return ["You picked the bond between %s and %s"%(self.pk1_st, self.pk2_st)]
else:
return ["Please pick an atom or a bond..."]
def do_pick(self,picked_bond):
cmd.iterate("pk1","setattr(cmd.get_wizard(),'pk1_st',""'%s/%s/%s/%s/%s/%s'%(model,segi,chain,resi,name,index))")
print "Picking Loop"
if picked_bond:
cmd.iterate("pk2","setattr(cmd.get_wizard(),'pk2_st',""'%s/%s/%s/%s/%s/%s'%(model,segi,chain,resi,name,index))")
cmd.set_bond("line_color","orange","pk1","pk2")
print [self.pk1_st,self.pk2_st],'bond'
self.resids.append(int(self.pk1_st.split('/')[3])-1)
self.resids.append(int(self.pk2_st.split('/')[3])-1)
self.indexes.append(int(self.pk1_st.split('/')[5])-1)
self.indexes.append(int(self.pk2_st.split('/')[5])-1)
self.bonds.append(Bond(int(self.pk1_st.split('/')[5])-1,int(self.pk2_st.split('/')[5])-1,int(self.pk1_st.split('/')[3])-1,int(self.pk2_st.split('/')[3])-1))
# -1 converts to 0 start index, which is used for bonds - This will be one off from labels in pymol
cmd.unpick()
else:
# for single atom, also get 3D coordinates (EXAMPLE)
print "Single Atom"
self.load=None
cmd.iterate("pk1","setattr(cmd.get_wizard(),'pk1_r',""index)")
self.lead=self.pk1_r-1 #Converting to 0 start index, which is used for bonds
#This will be one off from labels in pymol
cmd.iterate_state(cmd.get_state(),"pk1","setattr(cmd.get_wizard(),'pk1_xyz',(x,y,z))")
#cmd.unpick()
cmd.refresh_wizard()
|
tmorrell/SamStruct
|
inputs/selector_prot.py
|
Python
|
gpl-2.0
| 14,462
|
[
"PyMOL"
] |
f53a1bdd45a1790ce5b7103c12b7b0c33cc235c08891231a06d67d8d9cd84324
|
#!/usr/bin/env python3
# -*- coding: utf8 -*- #
#
#
# Copyright (C) by p.oseidon@datec.at, 1998 - 2017
#
# This file is part of tau4.
#
# tau4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tau4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tau4. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
from math import *
import tau4
from tau4.mathe.geometry import Point, Polygon
from tau4.mathe.geometry import Point2D
from tau4.mathe.linalg import T3D, V3D
from tau4.sensors import gps, IRS, IRSDummy, Locator, navi, Sensors2, SensorSpecDataIRS, SensorSpecDataUSS, USS, VirtualRanger
from tau4.sensors.gps import IpAddrSupplier
import time
import unittest
class _TESTCASE__Sensors(unittest.TestCase):
def test__simple( self):
"""
"""
print()
### Create and add some sensors to tau4
#
sensors = Sensors2()
### Visit all sensors
#
for sensor in sensors( id_group=None):
s = """
Sensor id (unique among all groups): %s
Sensor pose rel rack: %s
Measurement position rel sensor: %s
Measurement position rel rack: %s
""" % ( \
sensor.id(),
# Unique among all groups.
sensor.rT(),
# Lage des Sensors rel. eines Racks {R}.
sensor.sPm(),
# Position der Messung rel. des Sensors.
sensor.rPm(),
# Position der Messung rel. des Racks,
# auf dem der Sensor montiert ist.
)
print( s)
### Visit IRS only
#
sensors.add_group( id_group="irs")
sensors.add_sensor( \
id_group="irs",
sensor=IRS( \
id=-1,
specs_io=None,
specs_data=SensorSpecDataIRS( 42),
rT=tau4.mathe.linalg.T3DFromEuler()
)
)
for sensor in sensors( id_group="irs"):
print( """
Sensor Id (unique among all groups): %s
Sensor pose rel rack: %s
Measurement position rel sensor: %s
Measurement position rel rack: %s
""" % ( \
sensor.id(),
# Unique among all groups.
sensor.rT(),
# Lage des Sensors rel. eines Racks {R}.
sensor.sPo(),
# Rohmesswert des Sensors: Position eines
# Hindernisses rel. eines Racks.
# R a n g e r o n l y.
sensor.rPo(),
# Rechenwert des Sensors: Position des
# Hindernisses rel. eines Racks, auf
# dem der Sensor montiert ist.
# R a n g e r o n l y.
)
)
self.assertEqual( sensor.sPo(), sensor.sPm())
self.assertEqual( sensor.rPo(), sensor.rPm())
### Visit USS only
#
sensors.add_group( "uss")
sensors.add_sensor( \
id_group="uss",
sensor=USS( \
id=-1,
specs_io=None,
specs_data=SensorSpecDataUSS(),
rT=tau4.mathe.linalg.T3DFromEuler()
)
)
for sensor in sensors( id_group="uss"):
print( """
Sensor Id (unique among all groups): %s
Sensor pose rel rack: %s
Measurement position rel sensor: %s
Measurement position rel rack: %s
""" % ( \
sensor.id(),
# Unique among all groups.
sensor.rT(),
# Lage des Sensors rel. eines Racks {R}.
sensor.sPo(),
# Rohmesswert des Sensors: Position eines
# Hindernisses rel. eines Racks.
# R a n g e r o n l y.
sensor.rPo(),
# Rechenwert des Sensors: Position des
# Hindernisses rel. eines Racks, auf
# dem der Sensor montiert ist.
# R a n g e r o n l y.
)
)
self.assertEqual( sensor.sPo(), sensor.sPm())
self.assertEqual( sensor.rPo(), sensor.rPm())
### Visit GPS only
#
sensors.add_group( "gps")
rTs = tau4.mathe.linalg.T3DFromEuler()
# Lage des Sensors rel. eines Racks,
# auf dem er montiert ist.
sensor = gps.EmlidReachGPS( id="gps.emlid_reach", ip_addr="10.0.0.62", ip_portnbr=1962, rT=rTs)
sensors.add_sensor( id_group="gps", sensor=sensor)
self.assertAlmostEqual( 1, len( sensors( id_group="gps")))
self.assertIs( sensor, sensors( id_group="gps")[ 0])
# Wir haben nur einen GPS-Sensor eingehängt, als muss er das sein.
wTb = tau4.mathe.linalg.T3D.FromEuler()
for sensor in sensors( id_group="gps"):
print( """
Sensor Id (unique among all groups): %s
Transform of sensor rel rack: %s
Position of sensor rel {W}: %s
Position of sensor rel {B}: %s
Position of rack rel {B}: %s
""" % ( \
sensor.id(),
# Unique among all groups.
sensor.rT(),
# Eigene Lage rel. eines Racks {R}.
sensor.wP(),
# Rohmesswert des GPS: Position des Sensors
# rel. {W}.
# G P S o n l y. Allerdings könnte hier
# auch sPm() ausgeführt werden, dieses
# Interface haben alle Sensoren.
Locator().bases().wB().bP( sensor.wP()),
# Das GPS berechnet KEINE Position bez. {B},
# etwa mit einer Methode bP( wTb).
# Das muss über den Locator erledigt werden.
Locator().bases().wB().bP( sensor.wP()) - sensor.rT()._P_(),
# Das GPS berechnet KEINE Rack-Position bez. {B},
# etwa mit einer Methode bPr( wTb).
# Das muss über den Locator erledigt werden.
)
)
self.assertEqual( sensor.wP(), sensor.sPm())
self.assertEqual( sensor.rT()._P_(), sensor.rPm())
sensor_gps = sensor
### Visit NavSys only
#
sensors.add_group( "navi")
rTs = tau4.mathe.linalg.T3DFromEuler()
# Lage des Sensors rel. eines Racks,
# auf dem er montiert ist.
sensor = navi.NavSys( id="navi.emlid_reach", gps=sensor_gps)
sensors.add_sensor( id_group="navi", sensor=sensor)
self.assertAlmostEqual( 1, len( sensors( id_group="navi")))
self.assertIs( sensor, sensors( id_group="navi")[ 0])
# Wir haben nur einen GPS-Sensor eingehängt, als muss er das sein.
wTb = tau4.mathe.linalg.T3D.FromEuler()
for sensor in sensors( id_group="navi"):
print( """
Sensor Id (unique among all groups): %s
Transform of sensor rel rack: %s
Position of sensor rel {W}: %s
Position of sensor rel {B}: %s
Position of rack rel {B}: %s
""" % ( \
sensor.id(),
# Unique among all groups.
sensor.rT(),
# Eigene Lage rel. eines Racks {R}.
sensor.wP(),
# Rohmesswert des GPS: Position des Sensors
# rel. {W}.
# G P S o n l y. Allerdings könnte hier
# auch sPm() ausgeführt werden, dieses
# Interface haben alle Sensoren.
Locator().bases().wB().bP( sensor.wP()),
# Das GPS berechnet KEINE Position bez. {B},
# etwa mit einer Methode bP( wTb).
# Das muss über den Locator erledigt werden.
Locator().bases().wB().bP( sensor.wP()) - sensor.rT()._P_(),
# Das GPS berechnet KEINE Rack-Position bez. {B},
# etwa mit einer Methode bPr( wTb).
# Das muss über den Locator erledigt werden.
)
)
self.assertEqual( sensor.wP(), sensor.sPm())
self.assertEqual( sensor.rT()._P_(), sensor.rPm())
return
_Testsuite = unittest.makeSuite( _TESTCASE__Sensors)
class _TESTCASE__Rangers(unittest.TestCase):
def test( self):
"""
"""
print()
### Create the container being a Singleton
#
sensors = Sensors2()
### We work with some IRS sitting on a mobile robot.
#
# The y-axis of the robot's frame points to its front, the x-axis as
# a consequence to its right hand side.
#
# The frame sits in the robot's centre.
#
sensors.add_group( id_group="rangers")
sensor = IRSDummy( id="irs.11:00", specs_io=None, specs_data=SensorSpecDataIRS( 42), rT=tau4.mathe.linalg.T3DFromEuler( 0.250, 0.500, 0, radians( +15), 0, 0))
sensors.add_sensor( id_group="rangers", sensor=sensor)
sensor = IRSDummy( id="irs.13:00", specs_io=None, specs_data=SensorSpecDataIRS( 42), rT=tau4.mathe.linalg.T3DFromEuler( -0.250, 0.500, 0, radians( -15), 0, 0))
sensors.add_sensor( id_group="rangers", sensor=sensor)
### Cal. the transform of an obstacle relative to the robot {R}
#
for sensor in sensors( id_group="rangers"):
sensor.execute()
sensorLHS = sensors( id_sensor="irs.11:00"); self.assertIs( sensorLHS, sensors.sensor( "irs.11:00"))
sensorRHS = sensors( id_sensor="irs.13:00"); self.assertIs( sensorRHS, sensors.sensor( "irs.13:00"))
### Mock distances and check alpha of the resultant poses (obstacle absent)
#
sensorLHS._distance_( 0.800)
sensorRHS._distance_( 0.800)
rangers = sensors( id_group="rangers")
P = V3D()
for sensor in rangers:
rPo = sensor.rTo().P()
P += rPo
alpha = atan2( P.y(), P.x())
self.assertAlmostEqual( radians( 90), alpha)
### Mock distances and check alpha of the resultant poses (obstacle present)
#
sensorLHS._distance_( 0.400)
# Obstacle approachng on the lhs
sensorRHS._distance_( 0.800)
rangers = sensors( id_group="rangers")
P = V3D()
for sensor in rangers:
P += sensor.rTo().P()
alpha = atan2( P.y(), P.x())
self.assertAlmostEqual( Sensors2.rAlpha( rangers), alpha)
self.assertTrue( degrees( alpha) < 90)
# Vektor zeigt nach rechts, wenn das
# Hindernis von links kommt
self.assertAlmostEqual( 87.25481647, degrees( alpha))
return
_Testsuite.addTest( unittest.makeSuite( _TESTCASE__Rangers))
class _TESTCASE__IpAddrSupplier(unittest.TestCase):
def test( self):
"""
"""
print()
ipas = IpAddrSupplier( ip_addrs=("192.168.42.253", "10.0.0.254"), dirname="./")
# These addresses are taken when the testcase
# runs the very first time. All other
# testcase runs take the remembered values
# (see below).
ipas.open()
ip_addrs = ipas.ip_addrs()
self.assertEqual( "192.168.42.253", next( ip_addrs))
self.assertEqual( "10.0.0.254", next( ip_addrs))
self.assertEqual( "192.168.42.254", next( ip_addrs))
self.assertEqual( "192.168.42.255", next( ip_addrs))
self.assertEqual( "192.168.42.1", next( ip_addrs))
while next( ip_addrs) != "192.168.42.252":
pass
ipas.remember( "192.168.42.42")
self.assertEqual( "10.0.0.255", next( ip_addrs))
ipas.remember( "10.0.0.42")
ipas.close()
self.assertTrue( "192.168.42.42" in ipas.remembereds())
self.assertTrue( "10.0.0.42" in ipas.remembereds())
ipas.remember( "192.168.42.253")
# Otherwise this testcase would pass only once!
ipas.remember( "10.0.0.254")
# Otherwise this testcase would pass only once!
return
#_Testsuite.addTest( unittest.makeSuite( _TESTCASE__IpAddrSupplier))
class _TESTCASE__VirtualRanger(unittest.TestCase):
def test( self):
"""
"""
print()
### WorkspaceContour
#
contour = Polygon( (( 0, 0), ( 10, 10), ( 20, 10), ( 20, 0)))
### Sensor
#
rTs = T3D.FromEuler( 0, 0.5, 0)
# Sensor sitzt vorne auf der Nase, die
# 500 mm von {R} entfernt ist.
vr = VirtualRanger( rT=rTs)
### Roboter bewegen und schauen, was passiert
#
bTr = T3D.FromEuler( 10, 5)
# Strahl schneidet Contour nicht
vr.execute( contour, bTr=bTr)
hv = vr.headingvectorR()
self.assertTrue( contour.contains( bTr))
### Roboter bewegen und schauen, was passiert
#
bTr = T3D.FromEuler( 10, 9.5)
# Strahl schneidet Contour nicht
vr.execute( contour, bTr=bTr)
hv = vr.headingvectorR()
self.assertTrue( contour.contains( bTr))
### Roboter bewegen und schauen, was passiert
#
bTr = T3D.FromEuler( 10, 10.5)
# Roboter außerhalb Contour
vr.execute( contour, bTr=bTr)
hv = vr.headingvectorR()
self.assertFalse( contour.contains( bTr))
### Roboter bewegen und schauen, was passiert
#
bTr = T3D.FromEuler( 10, -10)
# Roboter außerhalb Contour
vr.reach( 100)
# Reichweite vergrößern
vr.execute( contour, bTr=bTr)
hv = vr.headingvectorR()
self.assertFalse( contour.contains( bTr))
return
_Testsuite.addTest( unittest.makeSuite( _TESTCASE__VirtualRanger))
class _TESTCASE__(unittest.TestCase):
def test( self):
"""
"""
print()
return
_Testsuite.addTest( unittest.makeSuite( _TESTCASE__))
def _lab_():
def tau4s( tau4pc):
print( tau4pc.client().wP())
sensor = gps.EmlidReachGPS( id=-1, ip_addr="192.168.42.0", ip_portnbr=1962, rT=T3D.FromEuler())
sensor.reg_tau4s_on_modified( tau4s)
t = time.time()
while True:
sensor.execute()
print( "State: '%s'. " % sensor._sm_().smstate_current().__class__.__name__)
# Only a GPS can do this.
return
def _lab2_():
def tau4s( tau4pc):
print( tau4pc.client().wP())
sensor = navi.NavSys( id=-1, gps=gps.EmlidReachGPS( id=-1, ip_addr="192.168.42.0", ip_portnbr=1962, rT=T3D.FromEuler()))
sensor.reg_tau4s_on_modified( tau4s)
t = time.time()
while True:
sensor.execute()
print( "The navisys' current state is '%s'. " % sensor.statename())
# Only a GPS can do this.
return
def _Test_():
unittest.TextTestRunner( verbosity=2).run( _Testsuite)
if __name__ == '__main__':
_Test_()
#_lab_()
#_lab2_()
input( u"Press any key to exit...")
|
p-o-seidon/tau4
|
src/tau4/test__tau4sensors.py
|
Python
|
gpl-3.0
| 17,045
|
[
"VisIt"
] |
1c5fd688a33950c341a038d14e81f1a40919d60aff57b2ecd446b9322442b933
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
********************************************************
**espressopp.interaction.DihedralHarmonicCos**
********************************************************
.. math::
U = K (cos(\phi) - cos(\phi_0))^2
.. function:: espressopp.interaction.DihedralHarmonicCos(K, phi0)
:param K: (default: 0.0)
:param phi0: (default: 0.0)
:type K: real
:type phi0: real
.. function:: espressopp.interaction.FixedQuadrupleListDihedralHarmonicCos(system, fql, potential)
:param system:
:param fql:
:param potential:
:type system:
:type fql:
:type potential:
.. function:: espressopp.interaction.FixedQuadrupleListDihedralHarmonicCos.getFixedQuadrupleList()
:rtype: A Python list of lists.
.. function:: espressopp.interaction.FixedQuadrupleListDihedralHarmonicCos.setPotential(potential)
:param potential:
:type potential:
"""
from espressopp import pmi
from espressopp.esutil import *
from espressopp.interaction.DihedralPotential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_DihedralHarmonicCos, interaction_FixedQuadrupleListDihedralHarmonicCos
class DihedralHarmonicCosLocal(DihedralPotentialLocal, interaction_DihedralHarmonicCos):
def __init__(self, K=0.0, phi0=0.0):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_DihedralHarmonicCos, K, phi0)
class FixedQuadrupleListDihedralHarmonicCosLocal(InteractionLocal, interaction_FixedQuadrupleListDihedralHarmonicCos):
def __init__(self, system, fql, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedQuadrupleListDihedralHarmonicCos, system, fql, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
def getFixedQuadrupleList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getFixedQuadrupleList(self)
if pmi.isController:
class DihedralHarmonicCos(DihedralPotential):
'The DihedralHarmonicCos potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.DihedralHarmonicCosLocal',
pmiproperty = ['K', 'phi']
)
class FixedQuadrupleListDihedralHarmonicCos(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedQuadrupleListDihedralHarmonicCosLocal',
pmicall = ['setPotential', 'getFixedQuadrupleList']
)
|
junghans/espressopp
|
src/interaction/DihedralHarmonicCos.py
|
Python
|
gpl-3.0
| 3,583
|
[
"ESPResSo"
] |
872e5714bf07849d299106d84e27c3a0619ea4dae95cf33246271c7f36b5f800
|
import logging
import time
import traceback
import numpy as np
from ..conventions import cf_encoder
from ..core import indexing
from ..core.pycompat import dask_array_type
from ..core.utils import FrozenDict, NdimSizeLenMixin
# Create a logger object, but don't add any handlers. Leave that to user code.
logger = logging.getLogger(__name__)
NONE_VAR_NAME = "__values__"
def _encode_variable_name(name):
if name is None:
name = NONE_VAR_NAME
return name
def _decode_variable_name(name):
if name == NONE_VAR_NAME:
name = None
return name
def find_root_and_group(ds):
"""Find the root and group name of a netCDF4/h5netcdf dataset."""
hierarchy = ()
while ds.parent is not None:
hierarchy = (ds.name.split("/")[-1],) + hierarchy
ds = ds.parent
group = "/" + "/".join(hierarchy)
return ds, group
def robust_getitem(array, key, catch=Exception, max_retries=6, initial_delay=500):
"""
Robustly index an array, using retry logic with exponential backoff if any
of the errors ``catch`` are raised. The initial_delay is measured in ms.
With the default settings, the maximum delay will be in the range of 32-64
seconds.
"""
assert max_retries >= 0
for n in range(max_retries + 1):
try:
return array[key]
except catch:
if n == max_retries:
raise
base_delay = initial_delay * 2 ** n
next_delay = base_delay + np.random.randint(base_delay)
msg = (
"getitem failed, waiting %s ms before trying again "
"(%s tries remaining). Full traceback: %s"
% (next_delay, max_retries - n, traceback.format_exc())
)
logger.debug(msg)
time.sleep(1e-3 * next_delay)
class BackendArray(NdimSizeLenMixin, indexing.ExplicitlyIndexed):
__slots__ = ()
def __array__(self, dtype=None):
key = indexing.BasicIndexer((slice(None),) * self.ndim)
return np.asarray(self[key], dtype=dtype)
class AbstractDataStore:
__slots__ = ()
def get_dimensions(self): # pragma: no cover
raise NotImplementedError()
def get_attrs(self): # pragma: no cover
raise NotImplementedError()
def get_variables(self): # pragma: no cover
raise NotImplementedError()
def get_encoding(self):
return {}
def load(self):
"""
This loads the variables and attributes simultaneously.
A centralized loading function makes it easier to create
data stores that do automatic encoding/decoding.
For example::
class SuffixAppendingDataStore(AbstractDataStore):
def load(self):
variables, attributes = AbstractDataStore.load(self)
variables = {'%s_suffix' % k: v
for k, v in variables.items()}
attributes = {'%s_suffix' % k: v
for k, v in attributes.items()}
return variables, attributes
This function will be called anytime variables or attributes
are requested, so care should be taken to make sure its fast.
"""
variables = FrozenDict(
(_decode_variable_name(k), v) for k, v in self.get_variables().items()
)
attributes = FrozenDict(self.get_attrs())
return variables, attributes
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
class ArrayWriter:
__slots__ = ("sources", "targets", "regions", "lock")
def __init__(self, lock=None):
self.sources = []
self.targets = []
self.regions = []
self.lock = lock
def add(self, source, target, region=None):
if isinstance(source, dask_array_type):
self.sources.append(source)
self.targets.append(target)
self.regions.append(region)
else:
if region:
target[region] = source
else:
target[...] = source
def sync(self, compute=True):
if self.sources:
import dask.array as da
# TODO: consider wrapping targets with dask.delayed, if this makes
# for any discernable difference in perforance, e.g.,
# targets = [dask.delayed(t) for t in self.targets]
delayed_store = da.store(
self.sources,
self.targets,
lock=self.lock,
compute=compute,
flush=True,
regions=self.regions,
)
self.sources = []
self.targets = []
self.regions = []
return delayed_store
class AbstractWritableDataStore(AbstractDataStore):
__slots__ = ()
def encode(self, variables, attributes):
"""
Encode the variables and attributes in this store
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
attributes : dict-like
Dictionary of key/value (attribute name / attribute) pairs
Returns
-------
variables : dict-like
attributes : dict-like
"""
variables = {k: self.encode_variable(v) for k, v in variables.items()}
attributes = {k: self.encode_attribute(v) for k, v in attributes.items()}
return variables, attributes
def encode_variable(self, v):
"""encode one variable"""
return v
def encode_attribute(self, a):
"""encode one attribute"""
return a
def set_dimension(self, dim, length): # pragma: no cover
raise NotImplementedError()
def set_attribute(self, k, v): # pragma: no cover
raise NotImplementedError()
def set_variable(self, k, v): # pragma: no cover
raise NotImplementedError()
def store_dataset(self, dataset):
"""
in stores, variables are all variables AND coordinates
in xarray.Dataset variables are variables NOT coordinates,
so here we pass the whole dataset in instead of doing
dataset.variables
"""
self.store(dataset, dataset.attrs)
def store(
self,
variables,
attributes,
check_encoding_set=frozenset(),
writer=None,
unlimited_dims=None,
):
"""
Top level method for putting data on this store, this method:
- encodes variables/attributes
- sets dimensions
- sets variables
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
attributes : dict-like
Dictionary of key/value (attribute name / attribute) pairs
check_encoding_set : list-like
List of variables that should be checked for invalid encoding
values
writer : ArrayWriter
unlimited_dims : list-like
List of dimension names that should be treated as unlimited
dimensions.
"""
if writer is None:
writer = ArrayWriter()
variables, attributes = self.encode(variables, attributes)
self.set_attributes(attributes)
self.set_dimensions(variables, unlimited_dims=unlimited_dims)
self.set_variables(
variables, check_encoding_set, writer, unlimited_dims=unlimited_dims
)
def set_attributes(self, attributes):
"""
This provides a centralized method to set the dataset attributes on the
data store.
Parameters
----------
attributes : dict-like
Dictionary of key/value (attribute name / attribute) pairs
"""
for k, v in attributes.items():
self.set_attribute(k, v)
def set_variables(self, variables, check_encoding_set, writer, unlimited_dims=None):
"""
This provides a centralized method to set the variables on the data
store.
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
check_encoding_set : list-like
List of variables that should be checked for invalid encoding
values
writer : ArrayWriter
unlimited_dims : list-like
List of dimension names that should be treated as unlimited
dimensions.
"""
for vn, v in variables.items():
name = _encode_variable_name(vn)
check = vn in check_encoding_set
target, source = self.prepare_variable(
name, v, check, unlimited_dims=unlimited_dims
)
writer.add(source, target)
def set_dimensions(self, variables, unlimited_dims=None):
"""
This provides a centralized method to set the dimensions on the data
store.
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
unlimited_dims : list-like
List of dimension names that should be treated as unlimited
dimensions.
"""
if unlimited_dims is None:
unlimited_dims = set()
existing_dims = self.get_dimensions()
dims = {}
for v in unlimited_dims: # put unlimited_dims first
dims[v] = None
for v in variables.values():
dims.update(dict(zip(v.dims, v.shape)))
for dim, length in dims.items():
if dim in existing_dims and length != existing_dims[dim]:
raise ValueError(
"Unable to update size for existing dimension"
"%r (%d != %d)" % (dim, length, existing_dims[dim])
)
elif dim not in existing_dims:
is_unlimited = dim in unlimited_dims
self.set_dimension(dim, length, is_unlimited)
class WritableCFDataStore(AbstractWritableDataStore):
__slots__ = ()
def encode(self, variables, attributes):
# All NetCDF files get CF encoded by default, without this attempting
# to write times, for example, would fail.
variables, attributes = cf_encoder(variables, attributes)
variables = {k: self.encode_variable(v) for k, v in variables.items()}
attributes = {k: self.encode_attribute(v) for k, v in attributes.items()}
return variables, attributes
|
xray/xray
|
xarray/backends/common.py
|
Python
|
apache-2.0
| 10,715
|
[
"NetCDF"
] |
cd7f6e8a7990fe173d4bd0c8ea48eb9513c99c25cc8db1cf25ad760d1e2eb434
|
# -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
The Doer defines instances that are going to decorate a big family of classes in this framework.
Staying on the idea, that one module should associate
one class, now a decorated class by a Doer should have a NameStr that is
a DoStr and express also method a method with the name <DoStr>[0].lower()+<DoStr>[1:]
All the attributes that are controlling this method process are <DoingStr><MiddleStr><TypeStr>
and all the ones resetted during the method are <DoneStr><MiddleStr><TypeStr>.
This helps a lot for defining a fisrt level of objects that are acting like input-output controllers.
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Standards.Classors.Defaultor")
DecorationModule=BaseModule
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
import collections
import inspect
import six
#</ImportSpecificModules>
#<DefineLocals>
DoingAttributePrefixStr='_'
#DoingDecorationPrefixStr='@'
DoingWrapPrefixStr='do_'
DoingDecorationPrefixStr=""
DoingDecorationTagStr="superDo"
DoingDecorationSuffixStr="_"
#</DefineLocals>
#<DefineFunctions>
def DefaultDoFunction(
_InstanceVariable,
*_LiargVariablesList,
**_KwargVariablesDict
):
return _InstanceVariable
def do(
_InstanceVariable,
*_LiargVariablesList,
**_KwargVariablesDict
):
#Define
DoDecorationMethodStr=_KwargVariablesDict['DoDecorationMethodStr']
DoMethodStr=DoDecorationMethodStr.split(DoingDecorationSuffixStr)[-1] if DoingDecorationSuffixStr in DoDecorationMethodStr else DoDecorationMethodStr
DoStr=DoMethodStr[0].upper()+DoMethodStr[1:]
DoingStr=DoStrToDoingStrOrderedDict[DoStr]
DoClassStr=_KwargVariablesDict['DoClassStr']
DoClass=getattr(SYS,DoClassStr)
DoWrapMethodStr=DoingWrapPrefixStr+DoMethodStr
DoWrapUnboundMethod=getattr(
DoClass,
DoWrapMethodStr
)
del _KwargVariablesDict['DoDecorationMethodStr']
del _KwargVariablesDict['DoClassStr']
#debug
'''
print('Doer l.54 inside of the function DoFunction')
print('InstanceVariable is ',_InstanceVariable)
print('_LiargVariablesList is ',_LiargVariablesList)
print('_KwargVariablesDict is ',_KwargVariablesDict)
print('')
'''
#Definition of the DoKwargTuplesList
DoKwargTuplesList=map(
lambda __KwargTuple:
(
DoingStr+DoingAttributePrefixStr.join(
__KwargTuple[0].split(DoingAttributePrefixStr)[1:]),
__KwargTuple[1]
) if __KwargTuple[0].startswith(DoingAttributePrefixStr)
else __KwargTuple,
_KwargVariablesDict.items()
)
#Check
if len(DoKwargTuplesList)>0:
#group by
[
DoClass.DoTempAttributeItemTuplesList,
DoClass.DoTempNotAttributeItemTupleItemsList
]=SYS.groupby(
lambda __DoKwargTuple:
hasattr(_InstanceVariable,__DoKwargTuple[0]),
DoKwargTuplesList
)
#set in the instance the corresponding kwarged arguments
map(
lambda __DoTempAttributeItemTuple:
#set direct explicit attributes
_InstanceVariable.__setattr__(*__DoTempAttributeItemTuple),
DoClass.DoTempAttributeItemTuplesList
)
#Define
DoneKwargDict=dict(DoClass.DoTempNotAttributeItemTupleItemsList)
else:
#Define
DoneKwargDict={}
#map
TypeClassesList=map(
lambda __DoneKeyStr:
SYS.getTypeClassWithTypeStr(
SYS.getTypeStrWithKeyStr(__DoneKeyStr)
)
if getattr(_InstanceVariable,__DoneKeyStr)==None
else None.__class__,
DoClass.DoingAttributeVariablesOrderedDict.keys(
)+DoClass.DoneAttributeVariablesOrderedDict.keys()
)
#debug
'''
print('TypeClassesList is '+str(TypeClassesList))
print('')
'''
#set in the instance
map(
lambda __DoneKeyStr,__TypeClass:
setattr(
_InstanceVariable,
__DoneKeyStr,
__TypeClass()
)
if __TypeClass!=None.__class__
else None,
DoClass.DoingAttributeVariablesOrderedDict.keys(
)+DoClass.DoneAttributeVariablesOrderedDict.keys(),
TypeClassesList
)
#debug
'''
print('Doer l.274 we are going to call the DoWrapMethod')
print('DoWrapMethod is ',DoWrapMethod)
print('')
'''
#Return the call of the defined do method
if len(DoneKwargDict)>0:
return DoWrapUnboundMethod(
_InstanceVariable,
*_LiargVariablesList,
**DoneKwargDict
)
else:
return DoWrapUnboundMethod(
_InstanceVariable,
*_LiargVariablesList
)
#</DefineFunctions>
#<DefineClass>
@DecorationClass()
class DoerClass(BaseClass):
def default_init(self,
_DoClass=None,
_DoingGetBool=False,
**_KwargVariablesDict
):
#Call the parent init method
BaseClass.__init__(self,**_KwargVariablesDict)
def __call__(self,_Class):
#debug
'''
print('Doer l.247 __call__ method')
print('_Class is ',_Class)
print('')
'''
#Call the parent init method
BaseClass.__call__(self,_Class)
#Do
self.do(_Class)
#Debug
'''
print('do is done')
print('')
'''
#Return
return _Class
def do(self,_Class):
#set
self.DoClass=_Class
#debug
'''
print("Doer l.337 : self.DoClass is ",self.DoClass)
print('')
'''
#alias
DoClass=self.DoClass
#Definition
DoerStr=DoClass.NameStr
DoStr=DoerStrToDoStrOrderedDict[DoerStr]
DoMethodStr=DoStr[0].lower()+DoStr[1:] if DoStr[0]!='_' else '_'+DoStr[1].lower()+DoStr[2:]
DoneStr=DoStrToDoneStrOrderedDict[DoStr]
DoingStr=DoneStrToDoingStrOrderedDict[DoneStr]
LocalVariablesDict=vars()
#debug
print('Doer l.132 : DoerStr is '+DoerStr)
print('DoStr is '+DoStr)
print('DoMethodStr is '+DoMethodStr)
print('DoingStr is '+DoingStr)
print('DoneStr is '+DoneStr)
print('')
#set
map(
lambda __KeyStr:
setattr(DoClass,__KeyStr,LocalVariablesDict[__KeyStr]),
['DoerStr','DoStr','DoneStr','DoingStr','DoMethodStr']
)
#set a lists that will contain the tempory setting items during a call of the <do> method in the instance
DoClass.DoHistoryOrderedDict=collections.OrderedDict()
#Check
if hasattr(DoClass,'DefaultAttributeItemTuplesList'):
#Debug
'''
print('Doer l.383')
print('DoClass.DefaultAttributeItemTuplesList is ',_Class.DefaultAttributeItemTuplesList)
print('')
'''
#Check for doing and done keyStrs
DoClass.DoneAttributeVariablesOrderedDict=collections.OrderedDict(
SYS._filter(
lambda __DefaultAttributeItemTuple:
__DefaultAttributeItemTuple[0].startswith(DoneStr),
DoClass.DefaultAttributeItemTuplesList
)
)
DoClass.DoingAttributeVariablesOrderedDict=collections.OrderedDict(
SYS._filter(
lambda __DefaultAttributeItemTuple:
__DefaultAttributeItemTuple[0].startswith(DoingStr),
DoClass.DefaultAttributeItemTuplesList
)
)
#Definition
DoWrapMethodStr=DoingWrapPrefixStr+DoMethodStr
#Debug
'''
print('Doer l.401')
print('DoClass.DoneAttributeVariablesOrderedDict is ',DoClass.DoneAttributeVariablesOrderedDict)
print('DoClass.DoingAttributeVariablesOrderedDict is ',DoClass.DoingAttributeVariablesOrderedDict)
print('DoWrapMethodStr is ',DoWrapMethodStr)
print('')
'''
#Check
if hasattr(DoClass,DoWrapMethodStr):
#Debug
'''
print('There is a DoWrapMethod here already')
print('')
'''
#Get
DoWrapMethod=getattr(
DoClass,
DoWrapMethodStr
)
else:
#Debug
'''
print('There is no DoWrapMethod here')
print('')
'''
#Definition of a default function
DoWrapMethod=DefaultDoFunction
#debug
'''
print('DoWrapMethod is '+str(DoWrapMethod))
print('')
'''
#Link
"""
DoingMethodKeyStr='init'+DoClass.NameStr
setattr(
DoClass,
DoingMethodKeyStr,
initDo
)
"""
#Definition of the ExecStr that will define the function
DoDecorationMethodStr=DoingDecorationPrefixStr+DoingDecorationTagStr+DoingDecorationSuffixStr+DoMethodStr
DoExecStr="def "+DoDecorationMethodStr+"(_InstanceVariable,"
DoExecStr+=",".join(
map(
lambda __KeyStr:
DoingAttributePrefixStr+__KeyStr+"=None",
DoClass.DoingAttributeVariablesOrderedDict.keys()
)
)
DoExecStr+="," if DoExecStr[-1]!="," else ""
DoExecStr+="*_LiargVariablesList,"
DoExecStr+="**_KwargVariablesDict):\n\t"
#Debug part
#DoExecStr+='\n\tprint("In '+DoDecorationMethodStr+' with '+DoWrapMethod.__name__+' ") '
'''
DoExecStr+="\n\t#Debug"
DoExecStr+=('\n\t'+';\n\t'.join(
map(
lambda __KeyStr:
'print("In DoerFunction, '+DoingAttributePrefixStr+__KeyStr+' is ",'+DoingAttributePrefixStr+__KeyStr+')',
_Class.DoingAttributeVariablesOrderedDict.keys()
)
)+";") if len(_Class.DoingAttributeVariablesOrderedDict.keys())>0 else ''
DoExecStr+='\n\tprint("_LiargVariablesList is ",_LiargVariablesList);'
DoExecStr+='\n\tprint("_KwargVariablesDict is ",_KwargVariablesDict);\n\t'
'''
#Set the doing variables
"""
DoExecStr+="\n\t#set the doing variables"
DoExecStr+="\n\tDoHistoryOrderedDict=_InstanceVariable.__class__.DoHistoryOrderedDict"
DoExecStr+="\n\tif '"+DoDecorationMethodStr+"' not in DoHistoryOrderedDict:DoHistoryOrderedDict['"+DoDecorationMethodStr+"']=SYS.collections.OrderedDict()"
DoExecStr+="\n\tDoneSpecificAttributesOrderedDict=DoHistoryOrderedDict['"+DoDecorationMethodStr+"']"
DoExecStr+=("\n"+";\n".join(
map(
lambda __KeyStr:
"\n".join(
[
"\tif "+DoingAttributePrefixStr+__KeyStr+"!=None:",
"\t\t_InstanceVariable."+__KeyStr+"="+DoingAttributePrefixStr+__KeyStr,
"\t\tDoneSpecificAttributesOrderedDict['"+__KeyStr+"']="+DoingAttributePrefixStr+__KeyStr,
"\telse:",
"\t\tDoneSpecificAttributesOrderedDict['"+__KeyStr+"']=None"
]
),
DoClass.DoingAttributeVariablesOrderedDict.keys()
)
)+";\n") if len(
DoClass.DoingAttributeVariablesOrderedDict.keys()
)>0 else ''
"""
DoExecStr+=("\n"+";\n".join(
map(
lambda __KeyStr:
"\n".join(
[
"\tif "+DoingAttributePrefixStr+__KeyStr+"!=None:",
"\t\t_InstanceVariable."+__KeyStr+"="+DoingAttributePrefixStr+__KeyStr,
]
),
DoClass.DoingAttributeVariablesOrderedDict.keys()
)
)+";\n") if len(
DoClass.DoingAttributeVariablesOrderedDict.keys()
)>0 else ''
#Give to the class this part (it can serve after for imitating methods...)
DoExecStrKeyStr='Do'+DoClass.NameStr+'ExecStr'
setattr(DoClass,DoExecStrKeyStr,DoExecStr)
#Call the initDo method
DoExecStr+="\n" if DoExecStr[-1]!="\n" else ""
DoExecStr+="\n\t#return\n\t"
#Check
setattr(DoClass,'DoingGetBool',self.DoingGetBool)
if self.DoingGetBool==False:
#Return the _InstanceVariable if it is not a getter object
DoExecStr+="do(_InstanceVariable,"
DoExecStr+="*_LiargVariablesList,"
DoExecStr+="**dict(_KwargVariablesDict,**{'DoDecorationMethodStr':'"+DoDecorationMethodStr+"','DoClassStr':'"+DoClass.__name__+"'}))\n\t"
DoExecStr+="return _InstanceVariable\n"
else:
#Return the output of the do method
DoExecStr+="return DoFunction(_InstanceVariable,"
DoExecStr+="*_LiargVariablesList,"
DoExecStr+="**dict(_KwargVariablesDict,**{'DoDecorationMethodStr':'"+DoDecorationMethodStr+"','DoClassStr':'"+DoClass.__name__+"'}))\n"
#debug
'''
print('Doer l 403')
print('DoExecStr is ')
print(DoExecStr)
print('')
'''
#exec
six.exec_(DoExecStr)
#set
#locals(
# )[DoDecorationMethodStr].DoWrapMethod=DoWrapMethod
#Debug
'''
print('l. 907 Doer')
print('DoClass is ',DoClass)
print('DoDecorationMethodStr is ',DoDecorationMethodStr)
print('DoWrapMethod is ',DoWrapMethod)
print("locals()[DoDecorationMethodStr] is ",locals()[DoDecorationMethodStr])
print('')
'''
#set with the specific name
setattr(
DoClass,
DoDecorationMethodStr,
locals()[DoDecorationMethodStr]
)
#set with the DoMethodStr shortcut
setattr(
DoClass,
DoMethodStr,
locals()[DoDecorationMethodStr]
)
#Add to the KeyStrsList
DoClass.KeyStrsList+=[
'DoerStr',
'DoStr',
'DoneStr',
'DoingStr',
'DoneAttributeVariablesOrderedDict',
'DoingAttributeVariablesOrderedDict',
DoExecStrKeyStr,
'DoingGetBool',
'DoTempAttributeItemTuplesList',
'DoTempNotAttributeItemTupleItemsList'
]
#</DefineClass>
#<DefineLocals>
DoStrsTuplesList=[
('Doer','Do','Doing','Done'),
('Deriver','Derive','Deriving','Derived'),
('Propertiser','Propertize','Propertizing','Propertized'),
('Inspecter','Inspect','Inspecting','Inspected'),
('Representer','Represent','Representing','Represented'),
('Printer','_Print','Printing','Printed'),
('Debugger','Debug','Debugging','Debugged'),
('Functer','Funct','Functing','Functed'),
('Moduler','Module','Moduling','Moduled'),
('Attester','Attest','Attesting','Attested'),
('Tester','Test','Testing','Tested'),
('Hooker','Hook','Hooking','Hooked'),
('Conditioner','Condition','Conditioning','Conditioned'),
('Concluder','Conclude','Concluding','Concluded'),
('Observer','Observe','Observing','Observed'),
('Binder','Bind','Binding','Binded'),
('Switcher','Switch','Switching','Switched'),
('Resetter','Reset','Resetting','Resetted'),
('Caller','Call','Calling','Called'),
('Cloner','Clone','Cloning','Cloned'),
('Watcher','Watch','Watching','Watched'),
('Classer','_Class','Classing','Classed'),
('Argumenter','Argument','Argumenting','Argumented'),
('Imitater','Imitate','Imitating','Imitated'),
('Alerter','Alert','Alerting','Alerted'),
('Interfacer','Interface','Interfacing','Interfaced'),
('Folderer','Folder','Foldering','Foldered'),
('Filer','File','Filing','Filed'),
('Closer','Close','Closing','Closed'),
('Loader','Load','Loading','Loaded'),
('Writer','Write','Writing','Writed'),
('Capturer','Capture','Capturing','Captured'),
('Processer','Process','Processing','Processed'),
('Statuser','Status','Statusing','Statused'),
('Killer','Kill','Killing','Killed'),
('Directer','Direct','Directing','Directed'),
('Hdformater','Hdformat','Hdformating','Hdformated'),
('Guider','Guide','Guiding','Guided'),
('Scriptbooker','Scriptbook','Scriptbooking','Scriptbooked'),
('Celler','Cell','Celling','Celled'),
('Notebooker','Notebook','Notebooking','Notebooked'),
('Markdowner','Markdown','Markdowning','Markdowned'),
('Readmer','Readme','Readming','Readmed'),
('Installer','Install','Installing','Installed'),
('Documenter','Document','Documenting','Documented'),
('Itemizer','Itemize','Itemizing','Itemized'),
('Getter','Get','Getting','Getted'),
('Setter','Set','Setting','Setted'),
('Deleter','Delete','Deleting','Deleted'),
('Attributer','Attribute','Attributing','Attributed'),
('Restricter','Restrict','Restricting','Restricted'),
('Pather','Path','Pathing','Pathed'),
('Sharer','Share','Sharing','Shared'),
('Executer','Execute','Executing','Executed'),
('Pointer','Point','Pointing','Pointed'),
('Applyier','Apply','Applying','Applied'),
('Mapper','Map','Mapping','Mapped'),
('Picker','Pick','Picking','Pick'),
('Gatherer','Gather','Gathering','Gathered'),
('Updater','Update','Updating','Updated'),
('Linker','Link','Linking','Linked'),
('Weaver','Weave','Weaving','Weaved'),
('Filterer','Filter','Filtering','Filterer'),
('Noder','Node','Noding','Noded'),
('Outputer','Output','Outputing','Outputed'),
('Appender','Append','Appending','Appended'),
('Instancer','Instance','Instancing','Instanced'),
('Adder','Add','Adding','Added'),
('Distinguisher','Distinguish','Distinguishing','Distinguished'),
('Parenter','Parent','Parenting','Parented'),
('Storer','Store','Storing','Stored'),
('Pusher','Push','Pushing','Pushed'),
('Producer','Produce','Producing','Produced'),
('Catcher','Catch','Catching','Catched'),
('Attentioner','Attention','Attentioning','Attentioned'),
('Coupler','Couple','Coupling','Coupled'),
('Settler','Settle','Settling','Settled'),
('Commander','Command','Commanding','Commanded'),
('Walker','Walk','Walking','Walked'),
('Collecter','Collect','Collecting','Collected'),
('Visiter','Visit','Visiting','Visited'),
('Recruiter','Recruit','Recruiting','Recruit'),
('Mobilizer','Mobilize','Mobilizing','Mobilized'),
('Router','Route','Routing','Routed'),
('Grabber','Grab','Grabbing','Grabbed'),
('Poker','Poke','Poking','Poked'),
('Connecter','Connect','Connecting','Connected'),
('Networker','Network','Networking','Networked'),
('Grouper','Group','Grouping','Grouped'),
('Structurer','Structure','Structuring','Structured'),
('Saver','Save','Saving','Saved'),
('Databaser','Database','Modeling','Modeled'),
('Modeler','Model','Modeling','Modeled'),
('Tabularer','Tabular','Tabularing','Tabulared'),
('Tabler','Table','Tabling','Tabled'),
('Rower','Row','Rowing','Rowed'),
('Inserter','Insert','Inserting','Inserted'),
('Retriever','Retrieve','Retrieving','Retrieved'),
('Findoer','Find','Finding','Found'),
('Recoverer','Recover','Recovering','Recovered'),
('Shaper','Shape','Shaping','Shaped'),
('Merger','Merge','Merging','Merged'),
('Scanner','Scan','Scanning','Scanned'),
('Joiner','Join','Joining','Joined'),
('Hierarchizer','Hierarchize','Hierarchizing','Hierarchized'),
('Analyzer','Analyze','Analyzing','Analyzed'),
('Grider','Grid','Griding','Grided'),
('Controller','Control','Controlling','Controlled'),
('Featurer','Feature','Featuring','Featured'),
('Recuperater','Recuperate','Recuperating','Recuperated'),
('Ploter','Plot','Ploting','Ploted'),
('Axer','Axe','Axing','Axed'),
('Paneler','Panel','Paneling','Paneled'),
('Figurer','Figure','Figuring','Figured'),
('Pyploter','Pyplot','Pyploting','Pyploted'),
('Multiplier','Multiply','Multiplying','Multiplied'),
('Sumer','Sum','Suming','Sumed'),
('Modulizer','Modulize','Modulizing','Modulized'),
('Simulater','Simulate','Simulating','Simulated'),
('Runner','Run','Running','Runned'),
('Moniter','Monit','Monitering','Monitered'),
('Populater','Populate','Populating','Populated'),
('Dynamizer','Dynamize','Dynamizing','Dynamized'),
('Rater','Rate','Rating','Rated'),
('Brianer','Brian','Brianing','Brianed'),
('Muziker','Muzik','Muziking','Muziked'),
('Vexflower','Vexflow','Vexflowing','Vexflowed'),
('Permuter','Permute','Permuting','Permuted'),
('Differenciater','Differenciate','Differenciating','Differenciated'),
('Pooler','Pool','Pooling','Pooled'),
('Harmonizer','Harmonize','Harmozing','Harmonized'),
('Maker','Make','Making','Made'),
('Builder','Build','Building','Built'),
('Incrementer','Increment','Incrementing','Incremented'),
('Mimicker','Mimic','Mimicking','Mimicked'),
('Blocker','Block','Blocking','Blocked'),
('Cumulater','Cumulate','Cumulating','Cumulated')
]
DoerStrToDoStrOrderedDict=SYS.dictify(DoStrsTuplesList,0,1)
DoStrToDoerStrOrderedDict=SYS.dictify(DoStrsTuplesList,1,0)
DoStrToDoingStrOrderedDict=SYS.dictify(DoStrsTuplesList,1,2)
DoStrToDoneStrOrderedDict=SYS.dictify(DoStrsTuplesList,1,3)
DoneStrToDoingStrOrderedDict=SYS.dictify(DoStrsTuplesList,3,2)
#</DefineLocals>
|
Ledoux/ShareYourSystem
|
Pythonlogy/ShareYourSystem/Standards/Classors/Doer/Drafts/__init__ copy 3.py
|
Python
|
mit
| 19,047
|
[
"Brian",
"VisIt"
] |
cb0d8b0407be862cc0aced62f931f15696f92ca7a2dffd08d5b0e379c4c6b48e
|
"""
the diags_file contains the hits that dagchainer found, the all_file is the full list of blast hits.
this script goes through all of the hits in the dag file and adds any hit from the all_file that is within 'dist' of
any hit in the diag.
"""
from rtree import Rtree
import sys
import os
import psyco; psyco.full()
"""
6 50
rice_1 1||36212110||36215209||OS01G61990||-1||CDS 36215209 36212110 sorghum_1 1||62493571||62496164||SB01G039040||-1||CDS 62496164 62493571 4.000000e-18 52
rice_1 1||36223907||36227093||OS01G62020||1||CDS 36223907 36227093 sorghum_1 1||62517465||62520208||SB01G039050||1||CDS 62517465 62520208 1.000000e-49 95
rice_1 1||36239128||36239914||OS01G62060||1||CDS 36239128 36239914 sorghum_1 1||62554416||62554908||SB01G039100||-1||CDS 62554908 62554416 2.000000e-10 96
rice_1 1||36293042||36293695||OS01G62130||-1||CDS 36293695 36293042 sorghum_1 1||62652716||62653309||SB01G039190||1||CDS 62652716 62653309 5.000000e-20 88
rice_1 1||36341022||36341441||OS01G62230||-1||CDS 36341441 36341022 sorghum_1 1||62699344||62699790||SB01G039260||-1||CDS 62699790 62699344 2.000000e-98 126
rice_1 1||36366694||36369819||OS01G62290||1||CDS 36366694 36369819 sorghum_1 1||62807491||62810206||SB01G039390||1||CDS 62807491 62810206 1.000000e-250 146
"""
def read_dag_to_tree(all_hits):
"""create an rtree, using query as x, subject as y
do this for all htis, then for each diag, do an intersection
(+bbuffer) to find nearby
"""
gdxs = {}
for i, sline in enumerate(open(all_hits)):
if sline[0] == '#': continue
line = sline[:-1].split("\t")
chrs = tuple(sorted([line[0], line[4]]))
# so save the index, which will return i when queried
# and associate i with the text line vie the dict.
if not chrs in gdxs: gdxs[chrs] = ({}, Rtree())
q0, q1 = sorted(map(int, line[2:4]))
s0, s1 = sorted(map(int, line[6:8]))
assert q0 < q1 and s0 < s1
gdxs[chrs][1].add(i, (q0, s0, q1, s1))
gdxs[chrs][0][i] = sline
return gdxs
def main(dist, diags, all_hits):
"""empty docstring"""
gdxs = read_dag_to_tree(all_hits)
seen = {}
for sline in open(diags):
# reset seen for each new diagonal...
if sline[0] == '#': seen = {}; print sline.strip(); continue
line = sline[:-1].split("\t")
chrs = (line[0], line[4])
info_lines, tree = gdxs[chrs]
q0, q1 = sorted(map(int, line[2:4]))
s0, s1 = sorted(map(int, line[6:8]))
assert q0 < q1 and s0 < s1
idxs = tree.intersection((q0 - dist, s0 - dist, q1 + dist, s1 + dist))
seen[(line[1], line[5])] = True
print sline,
for i in idxs:
iline = info_lines[i]
ikey = (iline[1], iline[5])
if ikey in seen: continue
seen[ikey] = True
print iline,
if __name__ == "__main__":
import optparse
parser = optparse.OptionParser()
parser.add_option("-d", "--dist", dest="dist", help="distance around each pair to look for missed pairs")
parser.add_option("--diags", dest="diags", help="the dag aligncoords file (something like q_s.dag.nodups.filter.aligncoords")
parser.add_option("--all", dest="all", help="the dag blast hit file containing all hits of q to s (something like q_s.dag.nodups")
(options, _) = parser.parse_args()
if not (options.dist and options.diags and options.all):
sys.exit(parser.print_help())
main(int(options.dist), options.diags, options.all)
|
LyonsLab/coge
|
bin/dagchainer/find_nearby.py
|
Python
|
bsd-2-clause
| 3,684
|
[
"BLAST"
] |
b5272fc21177503fcc1626d8e94a47320c97c16a4371cddc2647aa449dbd645f
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
from pymatgen.analysis.elasticity.elastic import ElasticTensor
from pymatgen.analysis.interfaces.substrate_analyzer import SubstrateAnalyzer
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.testing import PymatgenTest
class SubstrateAnalyzerTest(PymatgenTest):
# Clean up test to be based on test structures
def test_init(self):
# Film VO2
film = SpacegroupAnalyzer(self.get_structure("VO2"), symprec=0.1).get_conventional_standard_structure()
# Substrate TiO2
substrate = SpacegroupAnalyzer(self.get_structure("TiO2"), symprec=0.1).get_conventional_standard_structure()
film_elac = ElasticTensor.from_voigt(
[
[324.32, 187.3, 170.92, 0.0, 0.0, 0.0],
[187.3, 324.32, 170.92, 0.0, 0.0, 0.0],
[170.92, 170.92, 408.41, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 150.73, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 150.73, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 238.74],
]
)
s = SubstrateAnalyzer()
matches = list(s.calculate(film, substrate, film_elac))
self.assertEqual(len(matches), 192)
for match in matches:
assert match is not None
assert isinstance(match.match_area, float)
if __name__ == "__main__":
unittest.main()
|
vorwerkc/pymatgen
|
pymatgen/analysis/interfaces/tests/test_substrate_analyzer.py
|
Python
|
mit
| 1,474
|
[
"pymatgen"
] |
583f89faf1c018a17a2a14cdeaee669e4afe17fefbfd85a94949ea9e04520090
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import poisson, norm, uniform
from scipy.integrate import trapz
from am_bda import get_pdf_quantiles
NPTS = 100
NSIM = 1000
year = np.arange(10)+1976
accid = np.array([24, 25, 31, 31, 22, 21, 26, 20, 16, 22])
deaths = np.array([734, 516, 754, 877, 814, 362, 764, 809, 223, 1066])
drate = np.array([0.19, 0.12, 0.15, 0.16, 0.14, 0.06, 0.13, 0.13, 0.03, 0.15])
pmiles = deaths/drate * 100e6
# number of accidents is a Poission distribution with scale theta
def P_yi_theta(yi, theta):
return poisson.pmf(yi, mu=theta)
# likelihood of y_values given theta
def likelihood(y_values, theta):
p = np.empty(len(y_values), dtype=float)
for i in xrange(len(y_values)):
p[i] = P_yi_theta(y_values[i], theta)
return np.prod(p)
# start with a gaussian prior for theta
#theta_prior = norm(np.average(accid), np.std(accid))
theta_prior = uniform(600, 200)
theta = np.linspace(theta_prior.ppf(0.001), theta_prior.ppf(0.999), NPTS)
# post_unnormalized
u_post = np.empty(NPTS, dtype=float)
for i in xrange(NPTS):
u_post[i] = theta_prior.pdf(theta[i]) * likelihood(deaths, theta[i])
# norm factor
Z = trapz(u_post, theta)
# posterior
theta_post = u_post / Z
# get approximate posterior
mu = trapz(theta * theta_post, theta)
var = trapz((theta-mu)**2 * theta_post, theta)
theta_approx = norm(mu, np.sqrt(var))
# simulate NSIM values of theta and y
ysim = np.empty(NSIM, dtype=int)
for i in xrange(NSIM):
th = theta_approx.rvs()
ysim[i] = poisson.rvs(mu = th)
# get quantiles
yi_out = np.percentile(ysim, [2.5, 97.6])
#yi_out, pr_out = get_pdf_quantiles(y_post, y_range, [0.025, 0.975])
print yi_out
# plots
fig, axes = plt.subplots(1, 2)
plt.sca(axes[0])
plt.plot(theta, theta_prior.pdf(theta), label='prior')
plt.plot(theta, theta_post, label='posterior')
plt.xlabel('theta')
plt.ylabel('P(theta) or P(theta | data)')
plt.legend()
plt.sca(axes[1])
plt.hist(ysim)
plt.vlines(yi_out, 0, 100, 'k', lw=2, label='95% interval')
plt.xlabel('y_1986')
plt.ylabel('Hist(y_1986)')
plt.legend()
plt.show()
plt.close()
|
amaggi/bda
|
chapter_02/ex_12.py
|
Python
|
gpl-2.0
| 2,104
|
[
"Gaussian"
] |
85e5f80be9a5a4e615dfd996e2f2c4cdc670202dbb045fc790dad54d08af4088
|
import os
from setuptools import setup
os.chdir(os.path.abspath(os.path.dirname(__file__)))
packages = []
for rootdir, dirs, files in os.walk('vistrails'):
if '__init__.py' in files:
packages.append(rootdir.replace('\\', '.').replace('/', '.'))
def list_files(d, root):
files = []
for e in os.listdir(os.path.join(root, d)):
if os.path.isdir(os.path.join(root, d, e)):
files.extend(list_files('%s/%s' % (d, e), root))
elif not e.endswith('.pyc'):
files.append('%s/%s' % (d, e))
return files
package_data = {
'vistrails.core.collection': ['schema.sql', 'test.db'],
'vistrails.core': list_files('resources', 'vistrails/core'),
'vistrails.db': ['specs/all.xml'],
'vistrails.gui': list_files('resources/images', 'vistrails/gui') + ['resources/vistrails-mime.xml'],
'vistrails.packages.analytics': ['*.vt'], # FIXME : what is this?
'vistrails.packages.CLTools': ['icons/*.png', 'test_files/*'],
'vistrails.packages.persistence': ['schema.sql'],
'vistrails.packages.tabledata': ['test_files/*'],
'vistrails.tests': list_files('resources', 'vistrails/tests'),
}
for version in os.listdir('vistrails/db/versions'):
if not version.startswith('v'):
continue
package_data['vistrails.db.versions.%s' % version] = [
'schemas/sql/vistrails.sql',
'schemas/sql/vistrails_drop.sql',
'schemas/xml/log.xsd',
'schemas/xml/vistrail.xsd',
'schemas/xml/vtlink.xsd',
'schemas/xml/workflow.xsd',
'specs/all.xml',
]
description = """
VisTrails is an open-source data analysis and visualization tool. It provides a comprehensive provenance infrastructure that maintains detailed history information about the steps followed and data derived in the course of an exploratory task: VisTrails maintains provenance of data products, of the computational processes that derive these products and their executions.
For more information, take a look at the `documentation <http://www.vistrails.org/index.php/Documentation>`_, the `users guide <http://www.vistrails.org/usersguide/v2.0/html/>`_, or our `publications <http://www.vistrails.org/index.php/Publications,_Tutorials_and_Presentations>`_.
Binary releases are available on our `download <http://www.vistrails.org/index.php/Downloads>`_ page. To report bugs, please use the github `issue tracker <https://github.com/VisTrails/VisTrails/issues>`_, after checking our `FAQ <http://www.vistrails.org/index.php/FAQ>`_ for known issues.
Homepage: http://www.vistrails.org
Who we are: http://www.vistrails.org/index.php/People
"""
setup(name='vistrails',
version='2.2',
packages=packages,
package_data=package_data,
entry_points={
'console_scripts': [
'vistrails = vistrails.run:main']},
zip_safe=False,
install_requires=[
# 'PyQt<5.0',
'numpy',
'scipy',
'certifi',
'backports.ssl_match_hostname'],
description='Data analysis and visualization tool',
author="New York University",
author_email='vistrails-dev@vistrails.org',
url='http://www.vistrails.org/',
long_description=description,
license='BSD',
keywords=['vistrails', 'provenance', 'visualization', 'vtk', 'nyu',
'matplotlib', ],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2 :: Only',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Visualization'])
|
Nikea/VisTrails
|
setup.py
|
Python
|
bsd-3-clause
| 3,886
|
[
"VTK"
] |
be6c4ace14fda3fceb8c0402cbef720d4a4e3af5e4fa85233dac9b82eccf19f7
|
"""
Core visualization operations based on PyVista.
Actual implementation of _Renderer and _Projection classes.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
#
# License: Simplified BSD
import vtk
import pyvista
import warnings
import numpy as np
from .base_renderer import _BaseRenderer
from ...utils import copy_base_doc_to_subclass_doc
class _Projection(object):
"""Class storing projection information.
Attributes
----------
xy : array
Result of 2d projection of 3d data.
pts : None
Scene sensors handle.
"""
def __init__(self, xy=None, pts=None):
"""Store input projection information into attributes."""
self.xy = xy
self.pts = pts
def visible(self, state):
"""Modify visibility attribute of the sensors."""
self.pts.SetVisibility(state)
@copy_base_doc_to_subclass_doc
class _Renderer(_BaseRenderer):
"""Class managing rendering scene.
Attributes
----------
plotter: pyvista.Plotter
Main PyVista access point.
off_screen: bool
State of the offscreen.
name: str
Name of the window.
"""
def __init__(self, fig=None, size=(600, 600), bgcolor=(0., 0., 0.),
name="PyVista Scene", show=False):
from mne.viz.backends.renderer import MNE_3D_BACKEND_TEST_DATA
self.off_screen = False
self.name = name
if MNE_3D_BACKEND_TEST_DATA:
self.off_screen = True
if fig is None:
self.plotter = pyvista.Plotter(
window_size=size, off_screen=self.off_screen)
self.plotter.background_color = bgcolor
# this is a hack to avoid using a deleled ren_win
self.plotter._window_size = size
else:
# import basic properties
self.plotter = pyvista.Plotter(
window_size=fig._window_size, off_screen=fig.off_screen)
# import background
self.plotter.background_color = fig.background_color
# import actors
for actor in fig.renderer.GetActors():
self.plotter.renderer.AddActor(actor)
# import camera
self.plotter.camera_position = fig.camera_position
self.plotter.reset_camera()
def scene(self):
return self.plotter
def set_interactive(self):
self.plotter.enable_terrain_style()
def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False,
backface_culling=False, **kwargs):
vertices = np.c_[x, y, z]
n_triangles = len(triangles)
triangles = np.c_[np.full(n_triangles, 3), triangles]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
pd = pyvista.PolyData(vertices, triangles)
self.plotter.add_mesh(mesh=pd, color=color, opacity=opacity,
backface_culling=backface_culling)
def contour(self, surface, scalars, contours, line_width=1.0, opacity=1.0,
vmin=None, vmax=None, colormap=None):
from matplotlib import cm
from matplotlib.colors import ListedColormap
if colormap is None:
cmap = cm.get_cmap('coolwarm')
else:
cmap = ListedColormap(colormap / 255.0)
vertices = np.array(surface['rr'])
triangles = np.array(surface['tris'])
n_triangles = len(triangles)
triangles = np.c_[np.full(n_triangles, 3), triangles]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
pd = pyvista.PolyData(vertices, triangles)
pd.point_arrays['scalars'] = scalars
self.plotter.add_mesh(pd.contour(isosurfaces=contours,
rng=(vmin, vmax)),
show_scalar_bar=False,
line_width=line_width,
cmap=cmap,
opacity=opacity)
def surface(self, surface, color=None, opacity=1.0,
vmin=None, vmax=None, colormap=None, scalars=None,
backface_culling=False):
from matplotlib import cm
from matplotlib.colors import ListedColormap
if colormap is None:
cmap = cm.get_cmap('coolwarm')
else:
cmap = ListedColormap(colormap / 255.0)
vertices = np.array(surface['rr'])
triangles = np.array(surface['tris'])
n_triangles = len(triangles)
triangles = np.c_[np.full(n_triangles, 3), triangles]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
pd = pyvista.PolyData(vertices, triangles)
if scalars is not None:
pd.point_arrays['scalars'] = scalars
self.plotter.add_mesh(mesh=pd, color=color,
rng=[vmin, vmax],
show_scalar_bar=False,
opacity=opacity,
cmap=cmap,
backface_culling=backface_culling)
def sphere(self, center, color, scale, opacity=1.0,
resolution=8, backface_culling=False):
sphere = vtk.vtkSphereSource()
sphere.SetThetaResolution(resolution)
sphere.SetPhiResolution(resolution)
sphere.Update()
geom = sphere.GetOutput()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
pd = pyvista.PolyData(center)
self.plotter.add_mesh(pd.glyph(orient=False, scale=False,
factor=scale, geom=geom),
color=color, opacity=opacity,
backface_culling=backface_culling)
def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8,
glyph_height=None, glyph_center=None, glyph_resolution=None,
opacity=1.0, scale_mode='none', scalars=None,
backface_culling=False):
factor = scale
vectors = np.c_[u, v, w]
points = np.vstack(np.c_[x, y, z])
n_points = len(points)
offset = np.arange(n_points) * 3
cell_type = np.full(n_points, vtk.VTK_VERTEX)
cells = np.c_[np.full(n_points, 1), range(n_points)]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
grid = pyvista.UnstructuredGrid(offset, cells, cell_type, points)
grid.point_arrays['vec'] = vectors
if scale_mode == "scalar":
grid.point_arrays['mag'] = np.array(scalars)
scale = 'mag'
else:
scale = False
if mode == "arrow":
self.plotter.add_mesh(grid.glyph(orient='vec',
scale=scale,
factor=factor),
color=color,
opacity=opacity,
backface_culling=backface_culling)
elif mode == "cone":
cone = vtk.vtkConeSource()
if glyph_height is not None:
cone.SetHeight(glyph_height)
if glyph_center is not None:
cone.SetCenter(glyph_center)
if glyph_resolution is not None:
cone.SetResolution(glyph_resolution)
cone.Update()
geom = cone.GetOutput()
self.plotter.add_mesh(grid.glyph(orient='vec',
scale=scale,
factor=factor,
geom=geom),
color=color,
opacity=opacity,
backface_culling=backface_culling)
elif mode == "cylinder":
cylinder = vtk.vtkCylinderSource()
cylinder.SetHeight(glyph_height)
cylinder.SetCenter(glyph_center)
cylinder.SetResolution(glyph_resolution)
cylinder.Update()
# fix orientation
tr = vtk.vtkTransform()
tr.RotateWXYZ(90, 0, 0, 1)
trp = vtk.vtkTransformPolyDataFilter()
trp.SetInputData(cylinder.GetOutput())
trp.SetTransform(tr)
trp.Update()
geom = trp.GetOutput()
self.plotter.add_mesh(grid.glyph(orient='vec',
scale=scale,
factor=factor,
geom=geom),
color=color,
opacity=opacity,
backface_culling=backface_culling)
def text(self, x, y, text, width, color=(1.0, 1.0, 1.0)):
self.plotter.add_text(text, position=(x, y),
font_size=int(width * 100),
color=color)
def show(self):
self.plotter.show(title=self.name)
def close(self):
self.plotter.close()
def set_camera(self, azimuth=0.0, elevation=0.0, distance=1.0,
focalpoint=(0, 0, 0)):
phi = _deg2rad(azimuth)
theta = _deg2rad(elevation)
position = [
distance * np.cos(phi) * np.sin(theta),
distance * np.sin(phi) * np.sin(theta),
distance * np.cos(theta)]
self.plotter.camera_position = [
position, focalpoint, [0, 0, 1]]
self.plotter.reset_camera()
def screenshot(self):
return self.plotter.screenshot()
def project(self, xyz, ch_names):
xy = _3d_to_2d(self.plotter, xyz)
xy = dict(zip(ch_names, xy))
# pts = self.fig.children[-1]
pts = self.plotter.renderer.GetActors().GetLastItem()
return _Projection(xy=xy, pts=pts)
def _deg2rad(deg):
from numpy import pi
return deg * pi / 180.
def _mat_to_array(vtk_mat):
e = [vtk_mat.GetElement(i, j) for i in range(4) for j in range(4)]
arr = np.array(e, dtype=float)
arr.shape = (4, 4)
return arr
def _3d_to_2d(plotter, xyz):
size = plotter.window_size
xyz = np.column_stack([xyz, np.ones(xyz.shape[0])])
# Transform points into 'unnormalized' view coordinates
comb_trans_mat = _get_world_to_view_matrix(plotter)
view_coords = np.dot(comb_trans_mat, xyz.T).T
# Divide through by the fourth element for normalized view coords
norm_view_coords = view_coords / (view_coords[:, 3].reshape(-1, 1))
# Transform from normalized view coordinates to display coordinates.
view_to_disp_mat = _get_view_to_display_matrix(size)
xy = np.dot(view_to_disp_mat, norm_view_coords.T).T
# Pull the first two columns since they're meaningful for 2d plotting
xy = xy[:, :2]
return xy
def _get_world_to_view_matrix(plotter):
cam = plotter.renderer.camera
scene_size = plotter.window_size
clip_range = cam.GetClippingRange()
aspect_ratio = float(scene_size[0]) / scene_size[1]
vtk_comb_trans_mat = cam.GetCompositeProjectionTransformMatrix(
aspect_ratio, clip_range[0], clip_range[1])
vtk_comb_trans_mat = _mat_to_array(vtk_comb_trans_mat)
return vtk_comb_trans_mat
def _get_view_to_display_matrix(size):
x, y = size
view_to_disp_mat = np.array([[x / 2.0, 0., 0., x / 2.0],
[0., -y / 2.0, 0., y / 2.0],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
return view_to_disp_mat
|
adykstra/mne-python
|
mne/viz/backends/_pyvista.py
|
Python
|
bsd-3-clause
| 12,256
|
[
"VTK"
] |
62f24652211294a567b2c7a3b679a4899f659c1c6c354dde59b422915b35814c
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
def cars_checkpoint():
cars = h2o.upload_file(pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
predictors = ["displacement","power","weight","acceleration","year"]
response_col = "economy"
distribution = "gaussian"
# build first model
from h2o.estimators.gbm import H2OGradientBoostingEstimator
model1 = H2OGradientBoostingEstimator(ntrees=10,max_depth=2, min_rows=10, distribution=distribution)
model1.train(x=predictors,y=response_col,training_frame=cars)
# model1 = h2o.gbm(x=cars[predictors],y=cars[response_col],ntrees=10,max_depth=2, min_rows=10,
# distribution=distribution)
# continue building the model
model2 = H2OGradientBoostingEstimator(ntrees=11,max_depth=3, min_rows=9,r2_stopping=0.8,
distribution=distribution,checkpoint=model1._id)
model2.train(x=predictors,y=response_col,training_frame=cars)
# model2 = h2o.gbm(x=cars[predictors],y=cars[response_col],ntrees=11,max_depth=3, min_rows=9,r2_stopping=0.8,
# distribution=distribution,checkpoint=model1._id)
# erroneous, not MODIFIABLE_BY_CHECKPOINT_FIELDS
# PUBDEV-1833
# learn_rate
try:
model = H2OGradientBoostingEstimator(learn_rate=0.00001,distribution=distribution,
checkpoint=model1._id)
model.train(x=predictors,y=response_col,training_frame=cars)
# model = h2o.gbm(y=cars[response_col], x=cars[predictors],learn_rate=0.00001,distribution=distribution,
# checkpoint=model1._id)
assert False, "Expected model-build to fail because learn_rate not modifiable by checkpoint"
except EnvironmentError:
assert True
# nbins_cats
try:
model = H2OGradientBoostingEstimator(nbins_cats=99,distribution=distribution,
checkpoint=model1._id)
model.train(x=predictors,y=response_col,training_frame=cars)
# model = h2o.gbm(y=cars[response_col], x=cars[predictors],nbins_cats=99,distribution=distribution,
# checkpoint=model1._id)
assert False, "Expected model-build to fail because nbins_cats not modifiable by checkpoint"
except EnvironmentError:
assert True
# balance_classes
try:
model = H2OGradientBoostingEstimator(balance_classes=True,distribution=distribution,
checkpoint=model1._id)
model.train(x=predictors,y=response_col,training_frame=cars)
# model = h2o.gbm(y=cars[response_col], x=cars[predictors],balance_classes=True,distribution=distribution,
# checkpoint=model1._id)
assert False, "Expected model-build to fail because balance_classes not modifiable by checkpoint"
except EnvironmentError:
assert True
# nbins
try:
model = H2OGradientBoostingEstimator(nbins=99,distribution=distribution,
checkpoint=model1._id)
model.train(x=predictors,y=response_col,training_frame=cars)
# model = h2o.gbm(y=cars[response_col], x=cars[predictors],nbins=99,distribution=distribution,
# checkpoint=model1._id)
assert False, "Expected model-build to fail because nbins not modifiable by checkpoint"
except EnvironmentError:
assert True
# nfolds
try:
model = H2OGradientBoostingEstimator(nfolds=3,distribution=distribution,
checkpoint=model1._id)
model.train(x=predictors,y=response_col,training_frame=cars)
# model = h2o.gbm(y=cars[response_col], x=cars[predictors],nfolds=3,distribution=distribution,
# checkpoint=model1._id)
assert False, "Expected model-build to fail because nfolds not modifiable by checkpoint"
except EnvironmentError:
assert True
if __name__ == "__main__":
pyunit_utils.standalone_test(cars_checkpoint)
else:
cars_checkpoint()
|
h2oai/h2o-dev
|
h2o-py/tests/testdir_algos/gbm/pyunit_NOPASS_error_checkpointGBM.py
|
Python
|
apache-2.0
| 4,184
|
[
"Gaussian"
] |
afc82821d7de007143616de0eac42b43f55cd0675947fc69c60f468148ec19d3
|
#!/usr/bin/env python
# import os
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create pipeline - rectilinear grid
#
rgridReader = vtk.vtkRectilinearGridReader()
rgridReader.SetFileName(VTK_DATA_ROOT + "/Data/RectGrid2.vtk")
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(rgridReader.GetOutputPort())
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(outline.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
rgridReader.Update()
extract1 = vtk.vtkExtractRectilinearGrid()
extract1.SetInputConnection(rgridReader.GetOutputPort())
# extract1.SetVOI(0, 46, 0, 32, 0, 10)
extract1.SetVOI(23, 40, 16, 30, 9, 9)
extract1.SetSampleRate(2, 2, 1)
extract1.IncludeBoundaryOn()
extract1.Update()
surf1 = vtk.vtkDataSetSurfaceFilter()
surf1.SetInputConnection(extract1.GetOutputPort())
tris = vtk.vtkTriangleFilter()
tris.SetInputConnection(surf1.GetOutputPort())
mapper1 = vtk.vtkPolyDataMapper()
mapper1.SetInputConnection(tris.GetOutputPort())
mapper1.SetScalarRange(extract1.GetOutput().GetScalarRange())
actor1 = vtk.vtkActor()
actor1.SetMapper(mapper1)
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# ren1.AddActor(actor)
ren1.AddActor(actor1)
renWin.SetSize(340, 400)
iren.Initialize()
# render the image
#iren.Start()
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Filters/Extraction/Testing/Python/extractRectGrid.py
|
Python
|
gpl-3.0
| 1,492
|
[
"VTK"
] |
79cbdcb976db10f37292059948d74ad5144f4b8ca0ab934af62f0d599e13b511
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
from pathlib import Path
import neurom.geom.transform as gtr
import numpy as np
from neurom import load_neuron
from neurom.features import neuritefunc as _nf
from nose import tools as nt
TEST_UVEC = np.array([0.01856633, 0.37132666, 0.92831665])
TEST_ANGLE = np.pi / 3.
DATA_PATH = Path(__file__).parent.parent.parent.parent / 'test_data'
H5_NRN_PATH = DATA_PATH / 'h5/v1/Neuron.h5'
SWC_NRN_PATH = DATA_PATH / 'swc/Neuron.swc'
def _Rx(angle):
sn = np.sin(angle)
cs = np.cos(angle)
return np.array([[1., 0., 0.],
[0., cs, -sn],
[0., sn, cs]])
def _Ry(angle):
sn = np.sin(angle)
cs = np.cos(angle)
return np.array([[cs, 0., sn],
[0., 1., 0.],
[-sn, 0., cs]])
def _Rz(angle):
sn = np.sin(angle)
cs = np.cos(angle)
return np.array([[cs, -sn, 0.],
[sn, cs, 0.],
[0., 0., 1.]])
@nt.raises(NotImplementedError)
def test_not_implemented_transform_call_raises():
class Dummy(gtr.Transform3D):
pass
d = Dummy()
d([1, 2, 3])
@nt.raises(NotImplementedError)
def test_translate_bad_type_raises():
gtr.translate("hello", [1, 2, 3])
@nt.raises(NotImplementedError)
def test_rotate_bad_type_raises():
gtr.rotate("hello", [1, 0, 0], math.pi)
def test_translate_point():
t = gtr.Translation([100, -100, 100])
point = [1, 2, 3]
nt.assert_equal(t(point).tolist(), [101, -98, 103])
def test_translate_points():
t = gtr.Translation([100, -100, 100])
points = np.array([[1, 2, 3], [11, 22, 33], [111, 222, 333]])
nt.assert_true(np.all(t(points) == np.array([[101, -98, 103],
[111, -78, 133],
[211, 122, 433]])))
ROT_90 = np.array([[0, -1, 0],
[1, 0, 0],
[0, 0, 1]])
ROT_180 = np.array([[-1, 0, 0],
[0, -1, 0],
[0, 0, 1]])
ROT_270 = np.array([[0, 1, 0],
[-1, 0, 0],
[0, 0, 1]])
def test_rotate_point():
rot = gtr.Rotation(ROT_90)
nt.assert_equal(rot([2, 0, 0]).tolist(), [0, 2, 0])
nt.assert_equal(rot([0, 2, 0]).tolist(), [-2, 0, 0])
nt.assert_equal(rot([0, 0, 2]).tolist(), [0, 0, 2])
rot = gtr.Rotation(ROT_180)
nt.assert_equal(rot([2, 0, 0]).tolist(), [-2, 0, 0])
nt.assert_equal(rot([0, 2, 0]).tolist(), [0, -2, 0])
nt.assert_equal(rot([0, 0, 2]).tolist(), [0, 0, 2])
rot = gtr.Rotation(ROT_270)
nt.assert_equal(rot([2, 0, 0]).tolist(), [0, -2, 0])
nt.assert_equal(rot([0, 2, 0]).tolist(), [2, 0, 0])
nt.assert_equal(rot([0, 0, 2]).tolist(), [0, 0, 2])
def test_rotate_points():
rot = gtr.Rotation(ROT_90)
points = np.array([[2, 0, 0],
[0, 2, 0],
[0, 0, 2],
[3, 0, 3]])
nt.assert_true(np.all(rot(points) == np.array([[0, 2, 0],
[-2, 0, 0],
[0, 0, 2],
[0, 3, 3]])))
rot = gtr.Rotation(ROT_180)
nt.assert_true(np.all(rot(points) == np.array([[-2, 0, 0],
[0, -2, 0],
[0, 0, 2],
[-3, 0, 3]])))
rot = gtr.Rotation(ROT_270)
nt.assert_true(np.all(rot(points) == np.array([[0, -2, 0],
[2, 0, 0],
[0, 0, 2],
[0, -3, 3]])))
def test_pivot_rotate_point():
point = [1, 2, 3]
new_orig = np.array([10., 45., 50.])
t = gtr.Translation(new_orig)
t_inv = gtr.Translation(new_orig * -1)
R = gtr._rodrigues_to_dcm(TEST_UVEC, np.pi)
# change origin, rotate 180
p1 = gtr.PivotRotation(R, new_orig)(point)
# do the steps manually
p2 = t_inv(point)
p2 = gtr.Rotation(R)(p2)
p2 = t(p2)
nt.assert_equal(p1.tolist(), p2.tolist())
def test_pivot_rotate_points():
points = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12]])
new_orig = np.array([10., 45., 50.])
t = gtr.Translation(new_orig)
t_inv = gtr.Translation(new_orig * -1)
R = gtr._rodrigues_to_dcm(TEST_UVEC, np.pi)
# change origin, rotate 180
p1 = gtr.PivotRotation(R, new_orig)(points)
# do the steps manually
p2 = t_inv(points)
p2 = gtr.Rotation(R)(p2)
p2 = t(p2)
nt.assert_true(np.all(p1 == p2))
def _check_fst_nrn_translate(nrn_a, nrn_b, t):
# soma points
nt.assert_true(np.allclose((nrn_b.soma.points[:, 0:3] - nrn_a.soma.points[:, 0:3]), t))
_check_fst_neurite_translate(nrn_a.neurites, nrn_b.neurites, t)
def _check_fst_neurite_translate(nrts_a, nrts_b, t):
# neurite sections
for sa, sb in zip(_nf.iter_sections(nrts_a),
_nf.iter_sections(nrts_b)):
nt.assert_true(np.allclose((sb.points[:, 0:3] - sa.points[:, 0:3]), t))
def test_translate_fst_neuron_swc():
t = np.array([100., 100., 100.])
nrn = load_neuron(SWC_NRN_PATH)
tnrn = gtr.translate(nrn, t)
_check_fst_nrn_translate(nrn, tnrn, t)
def test_translate_fst_neurite_swc():
t = np.array([100., 100., 100.])
nrn = load_neuron(SWC_NRN_PATH)
nrt_a = nrn.neurites[0]
nrt_b = gtr.translate(nrt_a, t)
_check_fst_neurite_translate(nrt_a, nrt_b, t)
def test_transform_translate_neuron_swc():
t = np.array([100., 100., 100.])
nrn = load_neuron(SWC_NRN_PATH)
tnrn = nrn.transform(gtr.Translation(t))
_check_fst_nrn_translate(nrn, tnrn, t)
def test_translate_fst_neuron_h5():
t = np.array([100., 100., 100.])
nrn = load_neuron(H5_NRN_PATH)
tnrn = gtr.translate(nrn, t)
_check_fst_nrn_translate(nrn, tnrn, t)
def test_translate_fst_neurite_h5():
t = np.array([100., 100., 100.])
nrn = load_neuron(H5_NRN_PATH)
nrt_a = nrn.neurites[0]
nrt_b = gtr.translate(nrt_a, t)
_check_fst_neurite_translate(nrt_a, nrt_b, t)
def test_transform_translate_neuron_h5():
t = np.array([100., 100., 100.])
nrn = load_neuron(H5_NRN_PATH)
tnrn = nrn.transform(gtr.Translation(t))
_check_fst_nrn_translate(nrn, tnrn, t)
def _apply_rot(points, rot_mat):
return np.dot(rot_mat, np.array(points).T).T
def _check_fst_nrn_rotate(nrn_a, nrn_b, rot_mat):
# soma points
nt.assert_true(np.allclose(_apply_rot(nrn_a.soma.points[:, 0:3], rot_mat),
nrn_b.soma.points[:, 0:3]))
# neurite sections
_check_fst_neurite_rotate(nrn_a.neurites, nrn_b.neurites, rot_mat)
def _check_fst_neurite_rotate(nrt_a, nrt_b, rot_mat):
for sa, sb in zip(_nf.iter_sections(nrt_a),
_nf.iter_sections(nrt_b)):
nt.assert_true(np.allclose(sb.points[:, 0:3],
_apply_rot(sa.points[:, 0:3], rot_mat)))
def test_rotate_neuron_swc():
nrn_a = load_neuron(SWC_NRN_PATH)
nrn_b = gtr.rotate(nrn_a, [0, 0, 1], math.pi/2.0)
rot = gtr._rodrigues_to_dcm([0, 0, 1], math.pi/2.0)
_check_fst_nrn_rotate(nrn_a, nrn_b, rot)
def test_rotate_neurite_swc():
nrn_a = load_neuron(SWC_NRN_PATH)
nrt_a = nrn_a.neurites[0]
nrt_b = gtr.rotate(nrt_a, [0, 0, 1], math.pi/2.0)
rot = gtr._rodrigues_to_dcm([0, 0, 1], math.pi/2.0)
_check_fst_neurite_rotate(nrt_a, nrt_b, rot)
def test_transform_rotate_neuron_swc():
rot = gtr.Rotation(ROT_90)
nrn_a = load_neuron(SWC_NRN_PATH)
nrn_b = nrn_a.transform(rot)
_check_fst_nrn_rotate(nrn_a, nrn_b, ROT_90)
def test_rotate_neuron_h5():
nrn_a = load_neuron(H5_NRN_PATH)
nrn_b = gtr.rotate(nrn_a, [0, 0, 1], math.pi/2.0)
rot = gtr._rodrigues_to_dcm([0, 0, 1], math.pi/2.0)
_check_fst_nrn_rotate(nrn_a, nrn_b, rot)
def test_rotate_neurite_h5():
nrn_a = load_neuron(H5_NRN_PATH)
nrt_a = nrn_a.neurites[0]
nrt_b = gtr.rotate(nrt_a, [0, 0, 1], math.pi/2.0)
rot = gtr._rodrigues_to_dcm([0, 0, 1], math.pi/2.0)
_check_fst_neurite_rotate(nrt_a, nrt_b, rot)
def test_transform_rotate_neuron_h5():
rot = gtr.Rotation(ROT_90)
nrn_a = load_neuron(H5_NRN_PATH)
nrn_b = nrn_a.transform(rot)
_check_fst_nrn_rotate(nrn_a, nrn_b, ROT_90)
def test_rodrigues_to_dcm():
RES = np.array([[0.50017235, -0.80049871, 0.33019604],
[0.80739289, 0.56894174, 0.15627544],
[-0.3129606, 0.18843328, 0.9308859]])
R = gtr._rodrigues_to_dcm(TEST_UVEC, TEST_ANGLE)
# assess rotation matrix properties:
# detR = +=1
nt.assert_almost_equal(np.linalg.det(R), 1.)
# R.T = R^-1
nt.assert_true(np.allclose(np.linalg.inv(R), R.transpose()))
# check against calculated matrix
nt.assert_true(np.allclose(R, RES))
# check if opposite sign generates inverse
Rinv = gtr._rodrigues_to_dcm(TEST_UVEC, -TEST_ANGLE)
nt.assert_true(np.allclose(np.dot(Rinv, R), np.identity(3)))
# check basic rotations with a range of angles
for angle in np.linspace(0., 2. * np.pi, 10):
Rx = gtr._rodrigues_to_dcm(np.array([1., 0., 0.]), angle)
Ry = gtr._rodrigues_to_dcm(np.array([0., 1., 0.]), angle)
Rz = gtr._rodrigues_to_dcm(np.array([0., 0., 1.]), angle)
nt.assert_true(np.allclose(Rx, _Rx(angle)))
nt.assert_true(np.allclose(Ry, _Ry(angle)))
nt.assert_true(np.allclose(Rz, _Rz(angle)))
|
wizmer/NeuroM
|
neurom/geom/tests/test_transform.py
|
Python
|
bsd-3-clause
| 11,389
|
[
"NEURON"
] |
a4d834abda583de8a9a023981aaa1ac9443812dc0bb335d8dc8d1b885785b362
|
# Copyright (C) 2012,2013,2015
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
*******************************************
**espressopp.interaction.CoulombTruncated**
*******************************************
.. math::
U = k\frac{q_iq_j}{d_{ij}}
where `k` is the user-supplied prefactor, `q_i` is the charge of particle `i`, and `d_{ij}` is interparticle distance
In this interaction potential, a different charge can be associated with each particle. For a truncated Coulomb interaction potential where only one `q_iq_j` value is specified for all interactions, see CoulombTruncatedUniqueCharge.
.. function:: espressopppp.interaction.CoulombTruncated(prefactor, cutoff)
:param prefactor: (default: 1.0)
:param cutoff: (default: infinity)
:type prefactor: real
:type cutoff: real
.. function:: espressopppp.interaction.VerletListCoulombTruncated(vl)
:param vl:
:type vl:
.. function:: espressopppp.interaction.VerletListCoulombTruncated.getPotential(type1, type2)
:param type1:
:param type2:
:type type1:
:type type2:
.. function:: espressopppp.interaction.VerletListCoulombTruncated.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopppp.interaction.FixedPairListTypesCoulombTruncated(system, vl, potential)
:param system:
:param vl:
:type system:
:type vl:
.. function:: espressopppp.interaction.FixedPairListTypesCoulombTruncated.setPotential(potential)
:param potential:
:param type1:
:param type2:
:type potential:
:type type1:
:type type2:
"""
from espressopp import pmi, infinity
from espressopp.esutil import *
from espressopp.interaction.Potential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_CoulombTruncated, \
interaction_VerletListCoulombTruncated, \
interaction_FixedPairListTypesCoulombTruncated
class CoulombTruncatedLocal(PotentialLocal, interaction_CoulombTruncated):
'The (local) CoulombTruncated potential.'
def __init__(self, prefactor=1.0, cutoff=infinity):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_CoulombTruncated, prefactor, cutoff)
class VerletListCoulombTruncatedLocal(InteractionLocal, interaction_VerletListCoulombTruncated):
'The (local) CoulombTruncated interaction using Verlet lists.'
def __init__(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListCoulombTruncated, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
class FixedPairListTypesCoulombTruncatedLocal(InteractionLocal, interaction_FixedPairListTypesCoulombTruncated):
'The (local) CoulombTruncated interaction using FixedPair lists with types.'
def __init__(self, system, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListTypesCoulombTruncated, system, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
if pmi.isController:
class CoulombTruncated(Potential):
'The CoulombTruncated potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.CoulombTruncatedLocal',
pmiproperty = ['prefactor', 'alpha']
)
class VerletListCoulombTruncated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListCoulombTruncatedLocal',
pmicall = ['setPotential','getPotential']
)
class FixedPairListTypesCoulombTruncated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairListTypesCoulombTruncatedLocal',
pmicall = ['setPotential']
)
|
capoe/espressopp.soap
|
src/interaction/CoulombTruncated.py
|
Python
|
gpl-3.0
| 5,450
|
[
"ESPResSo"
] |
db724a7a4f2932499156f3b31ac1c5b89e337ceab7abee5b67ebd63f94e8bd49
|
# -*- coding: utf-8 -*-
""" *==LICENSE==*
CyanWorlds.com Engine - MMOG client, server and tools
Copyright (C) 2011 Cyan Worlds, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Additional permissions under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or
combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK,
NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent
JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK
(or a modified version of those libraries),
containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA,
PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG
JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the
licensors of this Program grant you additional
permission to convey the resulting work. Corresponding Source for a
non-source form of such a combination shall include the source code for
the parts of OpenSSL and IJG JPEG Library used as well as that of the covered
work.
You can contact Cyan Worlds, Inc. by email legal@cyan.com
or by snail mail at:
Cyan Worlds, Inc.
14617 N Newport Hwy
Mead, WA 99021
*==LICENSE==* """
"""
Module: Personal.py
Age: Personal
Date: Octoboer 2002
event manager hooks for the personal age
"""
from Plasma import *
from PlasmaTypes import *
from PlasmaKITypes import *
import string
import time
kEmptyGuid = '0000000000000000'
kIntroPlayedChronicle = "IntroPlayed"
# Hood Clothing
kHoodClothing = {"56d1173b-60b0-4b02-8e2f-b7730a61b4ec" : "GuildOfSleepers",
"e84fa8e5-86f0-4141-8f91-b01d4423d0c3" : "NULP"}
class Personal(ptResponder):
def __init__(self):
ptResponder.__init__(self)
self.id = 5022
self.version = 5
PtDebugPrint("Personal: __init__ version %d.%d" % (self.version,1),level=kWarningLevel)
def gotPublicAgeList(self,ages):
# got a list of cities, now we save our var!
# we're going to pick the one with the highest guid (nexus picks the lowest guid)
highestGuid = 0
for age in ages:
guid = age[0].getAgeInstanceGuid()
if guid > highestGuid:
highestGuid = guid
PtDebugPrint("Personal.gotPublicAgeList(): Using city GUID "+str(highestGuid))
vault = ptVault()
l = ptAgeInfoStruct()
l.setAgeFilename('city')
myCity = vault.getOwnedAgeLink(l)
if myCity:
cityInfo = myCity.getAgeInfo()
if cityInfo:
cityInfo.setAgeInstanceGuid(highestGuid)
cityInfo.save()
def OnFirstUpdate(self):
# already played intro sometime in the past... just let 'em play
# enable twice because if we came from the ACA (closet->ACA->personal) it was disabled twice
PtSendKIMessage(kEnableKIandBB,0)
PtSendKIMessage(kEnableKIandBB,0)
# enable yeesha book in case we came from the bahro cave
PtSendKIMessage(kEnableYeeshaBook,0)
# turn off sound log tracks
import xSndLogTracks
xSndLogTracks.UnsetLogMode()
# make sure we have at least the micro-ki (and therefore a Relto book)
# this does not downgrade us if we have a normal KI :)
PtSendKIMessageInt(kUpgradeKILevel, kMicroKI)
vault = ptVault()
l = ptAgeInfoStruct()
l.setAgeFilename('city')
myCity = vault.getOwnedAgeLink(l)
if myCity:
cityInfo = myCity.getAgeInfo()
if cityInfo:
if cityInfo.getAgeInstanceGuid()==kEmptyGuid:
# we don't have it yet, so make it! (the callback will make it for us)
PtGetPublicAgeList('city',self)
else:
PtDebugPrint("hmm. city link has no age info node")
else:
PtDebugPrint("hmm. player has no city link")
#~ # record our visit in player's chronicle
#~ kModuleName = "Personal"
#~ kChronicleVarName = "LinksIntoPersonalAge"
#~ kChronicleVarType = 0
#~ vault = ptVault()
#~ if type(vault) != type(None):
#~ entry = vault.findChronicleEntry(kChronicleVarName)
#~ if type(entry) == type(None):
#~ # not found... add current level chronicle
#~ vault.addChronicleEntry(kChronicleVarName,kChronicleVarType,"%d" %(1))
#~ PtDebugPrint("%s:\tentered new chronicle counter %s" % (kModuleName,kChronicleVarName))
#~ else:
#~ import string
#~ count = string.atoi(entry.chronicleGetValue())
#~ count = count + 1
#~ entry.chronicleSetValue("%d" % (count))
#~ entry.save()
#~ PtDebugPrint("%s:\tyour current count for %s is %s" % (kModuleName,kChronicleVarName,entry.chronicleGetValue()))
#~ else:
#~ PtDebugPrint("%s:\tERROR trying to access vault -- can't update %s variable in chronicle." % (kModuleName,kChronicleVarName))
pass
def OnServerInitComplete(self):
ageSDL = PtGetAgeSDL()
PtDebugPrint("Personal.OnServerInitComplete(): Grabbing first week clothing item boolean")
try:
firstWeekClothing = ageSDL["FirstWeekClothing"][0]
except:
PtDebugPrint("Unable to get the first week clothing item bool, not going to add it just to be safe")
firstWeekClothing = 0
avatar = PtGetLocalAvatar()
currentgender = avatar.avatar.getAvatarClothingGroup()
if firstWeekClothing:
if currentgender == kFemaleClothingGroup:
clothingName = "FReward_Beta"
else:
clothingName = "MReward_Beta"
clothingList = avatar.avatar.getWardrobeClothingList()
if clothingName not in clothingList:
PtDebugPrint("Adding "+clothingName+" clothing item to your closet! Aren't you lucky?")
avatar.avatar.addWardrobeClothingItem(clothingName,ptColor().white(),ptColor().white())
else:
PtDebugPrint("You already have " + clothingName + " so I'm not going to add it again.")
else:
PtDebugPrint("I guess you're too late, you don't get the first week clothing item")
PtDebugPrint("Personal.OnServerInitComplete(): Checking to see if we need to add reward clothing to your closet")
try:
rewardList = ageSDL["RewardClothing"][0]
except:
PtDebugPrint("Unable to grab the reward clothing list from SDL, not going to add anything")
rewardList = ""
PtDebugPrint("Personal.OnServerInitComplete(): Checking to see if we need to add global reward clothing to your closet")
try:
globalRewardList = ageSDL["GlobalRewardClothing"][0]
except:
PtDebugPrint("Unable to grab the global reward clothing list from SDL, not going to add anything")
globalRewardList = ""
nameSuffixList = []
if rewardList != "":
nameSuffixList += rewardList.split(";") # get all the suffixes
if globalRewardList != "":
nameSuffixList += globalRewardList.split(";") # add the global items
for suffix in nameSuffixList:
suffix = suffix.strip() # get rid of all the whitespace
if currentgender == kFemaleClothingGroup:
genderPrefix = "FReward_"
else:
genderPrefix = "MReward_"
clothingName = genderPrefix + suffix
clothingList = avatar.avatar.getWardrobeClothingList()
if clothingName not in clothingList:
PtDebugPrint("Adding "+clothingName+" to your closet")
avatar.avatar.addWardrobeClothingItem(clothingName,ptColor().white(),ptColor().white())
else:
PtDebugPrint("You already have " + clothingName + " so I'm not going to add it again.")
if rewardList != "":
ageSDL["RewardClothing"] = ("",)
else:
PtDebugPrint("Reward clothing list empty, not adding any clothing")
#save the avatar for startup
avatar.avatar.saveClothingToFile(str(PtGetLocalPlayer().getPlayerID()) + ".clo")
#Check for Hood-specific clothing
hoodguid = ptVault().getLinkToMyNeighborhood().getAgeInfo().getAgeInstanceGuid()
if currentgender == kFemaleClothingGroup:
genderPrefix = "FHood_"
else:
genderPrefix = "MHood_"
for shirtguid, shirtname in kHoodClothing.iteritems():
if hoodguid == shirtguid:
self.AddHoodClothing(genderPrefix + shirtname)
break
#####REMOVING CHRONICLES#####
remove = ["KIVirus", "KIVirusLevel"]
vault = ptVault()
folder = vault.getChronicleFolder()
folderNodeChildList = folder.getChildNodeRefList()
for folderChild in folderNodeChildList:
childNode = folderChild.getChild()
chronicle = childNode.upcastToChronicleNode()
name = chronicle.getName()
print name
if (name in remove):
folder.removeNode(childNode)
print "--->removed"
def AddHoodClothing(self, clothingName):
avatar = PtGetLocalAvatar()
clothingList = avatar.avatar.getWardrobeClothingList()
if clothingName not in clothingList:
PtDebugPrint("Adding "+clothingName+" to your closet")
avatar.avatar.addWardrobeClothingItem(clothingName,ptColor().white(),ptColor().white())
else:
PtDebugPrint("You already have " + clothingName + " so I'm not going to add it again.")
def Load(self):
pass
def OnNotify(self,state,id,events):
pass
|
TOC-Shard/moul-scripts
|
Python/Personal.py
|
Python
|
gpl-3.0
| 10,450
|
[
"VisIt"
] |
5eb61f2732e0bfbbbd09001a615c84740d42e68bba05375d8e719c0c707036b8
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
""" Global sisl fixtures """
import contextlib
import os
import numpy as np
from pathlib import Path
import pytest
from sisl import Atom, Geometry, SuperCell, Hamiltonian, _environ
# Here we create the necessary methods and fixtures to enabled/disable
# tests depending on whether a sisl-files directory is present.
# Modify items based on whether the env is correct or not
def pytest_collection_modifyitems(config, items):
sisl_files_tests = _environ.get_environ_variable("SISL_FILES_TESTS")
if sisl_files_tests.is_dir():
if (sisl_files_tests / 'sisl').is_dir():
return
print(f'pytest-sisl: Could not locate sisl directory in: {sisl_files_tests}')
return
skip_sisl_files = pytest.mark.skip(reason="requires env(SISL_FILES_TESTS) pointing to clone of: https://github.com/zerothi/sisl-files")
for item in items:
# Only skip those that have the sisl_files fixture
# GLOBAL skipping of ALL tests that don't have this fixture
if 'sisl_files' in item.fixturenames:
item.add_marker(skip_sisl_files)
@pytest.fixture(scope='function')
def sisl_tmp(request, tmp_path_factory):
""" sisl specific temporary file and directory creator.
sisl_tmp(file, dir_name='sisl')
sisl_tmp.file(file, dir_name='sisl')
sisl_tmp.dir('sisl')
The scope of the `sisl_tmp` fixture is at a function level to
clean up after each function.
"""
class FileFactory:
def __init__(self):
self.base = tmp_path_factory.getbasetemp()
self.dirs = [self.base]
self.files = []
def dir(self, name='sisl'):
# Make name a path
D = Path(name.replace(os.path.sep, '-'))
if not (self.base / D).is_dir():
# tmp_path_factory.mktemp returns pathlib.Path
self.dirs.append(tmp_path_factory.mktemp(str(D), numbered=False))
return self.dirs[-1]
def file(self, name, dir_name='sisl'):
# self.base *is* a pathlib
D = self.base / dir_name.replace(os.path.sep, '-')
if D in self.dirs:
i = self.dirs.index(D)
else:
self.dir(dir_name)
i = -1
self.files.append(self.dirs[i] / name)
return str(self.files[-1])
def getbase(self):
return self.dirs[-1]
def __call__(self, name, dir_name='sisl'):
""" Shorthand for self.file """
return self.file(name, dir_name)
def teardown(self):
while len(self.files) > 0:
# Do each removal separately
f = self.files.pop()
if f.is_file():
try:
f.close()
except:
pass
try:
f.unlink()
except:
pass
while len(self.dirs) > 0:
# Do each removal separately (from back of directory)
d = self.dirs.pop()
if d.is_dir():
try:
d.rmdir()
except:
pass
ff = FileFactory()
request.addfinalizer(ff.teardown)
return ff
@pytest.fixture(scope='session')
def sisl_files():
""" Environment catcher for the large files hosted in a different repository.
If SISL_FILES_TESTS has been defined in the environment variable the directory
will be used for the tests with this as a fixture.
If the environment variable is empty and a test has this fixture, it will
be skipped.
"""
sisl_files_tests = _environ.get_environ_variable("SISL_FILES_TESTS")
if not sisl_files_tests.is_dir():
def _path(*files):
pytest.skip(f"Environment SISL_FILES_TESTS not pointing to a valid directory.")
return _path
def _path(*files):
p = sisl_files_tests.joinpath(*files)
if p.exists():
return p
# I expect this test to fail due to the wrong environment.
# But it isn't an actual fail since it hasn't runned...
pytest.xfail(f"Environment SISL_FILES_TESTS may point to a wrong path(?); file {p} not found")
return _path
@pytest.fixture(scope='session')
def sisl_system():
""" A preset list of geometries/Hamiltonians. """
class System:
pass
d = System()
alat = 1.42
sq3h = 3.**.5 * 0.5
C = Atom(Z=6, R=1.42)
sc = SuperCell(np.array([[1.5, sq3h, 0.],
[1.5, -sq3h, 0.],
[0., 0., 10.]], np.float64) * alat,
nsc=[3, 3, 1])
d.g = Geometry(np.array([[0., 0., 0.],
[1., 0., 0.]], np.float64) * alat,
atoms=C, sc=sc)
d.R = np.array([0.1, 1.5])
d.t = np.array([0., 2.7])
d.tS = np.array([(0., 1.0),
(2.7, 0.)])
d.C = Atom(Z=6, R=max(d.R))
d.sc = SuperCell(np.array([[1.5, sq3h, 0.],
[1.5, -sq3h, 0.],
[0., 0., 10.]], np.float64) * alat,
nsc=[3, 3, 1])
d.gtb = Geometry(np.array([[0., 0., 0.],
[1., 0., 0.]], np.float64) * alat,
atoms=C, sc=sc)
d.ham = Hamiltonian(d.gtb)
d.ham.construct([(0.1, 1.5), (0.1, 2.7)])
return d
# We are ignoring stuff in sisl.viz.plotly if plotly cannot be imported
# collect - ignore seems not to fully work... I should report this upstream.
# however, the pytest_ignore_collect seems very stable and favourable
collect_ignore = ["setup.py"]
collect_ignore_glob = []
# skip paths
_skip_paths = []
try:
import plotly
except ImportError:
_skip_paths.append(os.path.join("sisl", "viz", "plotly"))
def pytest_ignore_collect(path, config):
# ensure we only compare against final *sisl* stuff
global _skip_paths
parts = list(Path(path).parts)
parts.reverse()
sisl_parts = parts[:parts.index("sisl")]
sisl_parts.reverse()
sisl_path = str(Path("sisl").joinpath(*sisl_parts))
for skip_path in _skip_paths:
if skip_path in sisl_path:
return True
return False
def pytest_configure(config):
pytest.sisl_travis_skip = pytest.mark.skipif(
os.environ.get("SISL_TRAVIS_CI", "false").lower() == "true",
reason="running on TRAVIS"
)
# Locally manage pytest.ini input
for mark in ['io', 'generic', 'bloch', 'hamiltonian', 'geometry', 'geom', 'shape',
'state', 'electron', 'phonon', 'utils', 'unit', 'distribution',
'spin', 'self_energy', 'help', 'messages', 'namedindex', 'sparse',
'supercell', 'sc', 'quaternion', 'sparse_geometry', 'sparse_orbital',
'ranges', 'physics',
'orbital', 'oplist', 'grid', 'atoms', 'atom', 'sgrid', 'sdata', 'sgeom',
'version', 'bz', 'brillouinzone', 'inv', 'eig', 'linalg',
'density_matrix', 'dynamicalmatrix', 'energydensity_matrix',
'siesta', 'tbtrans', 'vasp', 'w90', 'wannier90', 'gulp', 'fdf',
"category", "geom_category", "plot",
'slow', 'selector', 'overlap', 'mixing',
'viz', 'plotly', 'blender']:
config.addinivalue_line(
"markers", f"{mark}: mark test to run only on named environment"
)
|
zerothi/sisl
|
sisl/conftest.py
|
Python
|
mpl-2.0
| 7,718
|
[
"GULP",
"SIESTA",
"VASP",
"Wannier90"
] |
67f51956f10c606d8407b1d05f097785ef6669516ac0fa1a2b4b7942145e21de
|
""" Pyflation - Cosmological simulations in Python
Pyflation is a python package to simulate cosmological perturbations in the early universe.
Using the Klein-Gordon equations for both first and second order perturbations,
the evolution and behaviour of these perturbations can be studied.
The main entry point to this package is the cosmomodels module, which contains the main
simulation classes.
Configuration of the simulation runs can be changed in the configuration.py and run_config.py files.
For more information please visit http://pyflation.ianhuston.net.
"""
#Author: Ian Huston
#For license and copyright information see LICENSE.txt which was distributed with this file.
__version__ = "0.2.3"
|
ihuston/pyflation
|
pyflation/__init__.py
|
Python
|
bsd-3-clause
| 712
|
[
"VisIt"
] |
199b097956c5a2bf453c2ebf3e2d486d7f530916ae7bc75f851efc1830e4d216
|
""" SQLAlchemyDB:
This module provides the BaseRSSDB class for providing standard DB interactions.
Uses sqlalchemy
"""
__RCSID__ = "$Id$"
import datetime
from sqlalchemy import create_engine, desc, exc
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.query import Query
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.ConfigurationSystem.Client.Utilities import getDBParameters
class SQLAlchemyDB(object):
"""
Base class that defines some of the basic DB interactions.
"""
def __init__(self):
"""c'tor
:param self: self reference
"""
self.log = gLogger.getSubLogger(self.__class__.__name__)
self.extensions = gConfig.getValue('DIRAC/Extensions', [])
self.tablesList = []
def _initializeConnection(self, dbPath):
"""
Collect from the CS all the info needed to connect to the DB.
"""
result = getDBParameters(dbPath)
if not result['OK']:
raise Exception('Cannot get database parameters: %s' % result['Message'])
dbParameters = result['Value']
self.log.debug("db parameters: %s" % dbParameters)
self.host = dbParameters['Host']
self.port = dbParameters['Port']
self.user = dbParameters['User']
self.password = dbParameters['Password']
self.dbName = dbParameters['DBName']
self.engine = create_engine('mysql://%s:%s@%s:%s/%s' % (self.user,
self.password,
self.host,
self.port,
self.dbName),
pool_recycle=3600,
echo_pool=True,
echo=self.log.getLevel() == 'DEBUG')
self.sessionMaker_o = sessionmaker(bind=self.engine)
self.inspector = Inspector.from_engine(self.engine)
def _createTablesIfNotThere(self, tablesList):
"""
Adds each table in tablesList to the DB if not already present
"""
tablesInDB = self.inspector.get_table_names()
for table in tablesList:
if table not in tablesInDB:
found = False
# is it in the extension? (fully or extended)
for ext in self.extensions:
try:
getattr(
__import__(
ext + self.__class__.__module__,
globals(),
locals(),
[table]),
table).__table__.create(
self.engine) # pylint: disable=no-member
found = True
break
except (ImportError, AttributeError):
continue
# If not found in extensions, import it from DIRAC base.
if not found:
getattr(
__import__(
self.__class__.__module__,
globals(),
locals(),
[table]),
table).__table__.create(
self.engine) # pylint: disable=no-member
else:
gLogger.debug("Table %s already exists" % table)
def insert(self, table, params):
"""
Inserts params in the DB.
:param table: table where to insert
:type table: str
:param params: Dictionary to fill a single line
:type params: dict
:return: S_OK() || S_ERROR()
"""
# expire_on_commit is set to False so that we can still use the object after we close the session
session = self.sessionMaker_o(expire_on_commit=False) # FIXME: should we use this flag elsewhere?
found = False
for ext in self.extensions:
try:
tableRow_o = getattr(__import__(ext + self.__class__.__module__, globals(), locals(), [table]), table)()
found = True
break
except (ImportError, AttributeError):
continue
# If not found in extensions, import it from DIRAC base (this same module).
if not found:
tableRow_o = getattr(__import__(self.__class__.__module__, globals(), locals(), [table]), table)()
tableRow_o.fromDict(params)
try:
session.add(tableRow_o)
session.commit()
return S_OK()
except exc.IntegrityError as err:
self.log.warn("insert: trying to insert a duplicate key? %s" % err)
session.rollback()
except exc.SQLAlchemyError as e:
session.rollback()
self.log.exception("insert: unexpected exception", lException=e)
return S_ERROR("insert: unexpected exception %s" % e)
finally:
session.close()
def select(self, table, params):
"""
Uses params to build conditional SQL statement ( WHERE ... ).
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
:return: S_OK() || S_ERROR()
"""
session = self.sessionMaker_o()
# finding the table
found = False
for ext in self.extensions:
try:
table_c = getattr(__import__(ext + self.__class__.__module__, globals(), locals(), [table]), table)
found = True
break
except (ImportError, AttributeError):
continue
# If not found in extensions, import it from DIRAC base (this same module).
if not found:
table_c = getattr(__import__(self.__class__.__module__, globals(), locals(), [table]), table)
# handling query conditions found in 'Meta'
columnNames = [column.lower() for column in params.get('Meta', {}).get('columns', [])]
older = params.get('Meta', {}).get('older', None)
newer = params.get('Meta', {}).get('newer', None)
order = params.get('Meta', {}).get('order', None)
limit = params.get('Meta', {}).get('limit', None)
params.pop('Meta', None)
try:
# setting up the select query
if not columnNames: # query on the whole table
wholeTable = True
columns = table_c.__table__.columns # retrieve the column names
columnNames = [str(column).split('.')[1] for column in columns]
select = Query(table_c, session=session)
else: # query only the selected columns
wholeTable = False
columns = [getattr(table_c, column) for column in columnNames]
select = Query(columns, session=session)
# query conditions
for columnName, columnValue in params.iteritems():
if not columnValue:
continue
column_a = getattr(table_c, columnName.lower())
if isinstance(columnValue, (list, tuple)):
select = select.filter(column_a.in_(list(columnValue)))
elif isinstance(columnValue, (basestring, datetime.datetime, bool)):
select = select.filter(column_a == columnValue)
else:
self.log.error("type(columnValue) == %s" % type(columnValue))
if older:
column_a = getattr(table_c, older[0].lower())
select = select.filter(column_a < older[1])
if newer:
column_a = getattr(table_c, newer[0].lower())
select = select.filter(column_a > newer[1])
if order:
order = [order] if isinstance(order, basestring) else list(order)
column_a = getattr(table_c, order[0].lower())
if len(order) == 2 and order[1].lower() == 'desc':
select = select.order_by(desc(column_a))
else:
select = select.order_by(column_a)
if limit:
select = select.limit(int(limit))
# querying
selectionRes = select.all()
# handling the results
if wholeTable:
selectionResToList = [res.toList() for res in selectionRes]
else:
selectionResToList = [[getattr(res, col) for col in columnNames] for res in selectionRes]
finalResult = S_OK(selectionResToList)
finalResult['Columns'] = columnNames
return finalResult
except exc.SQLAlchemyError as e:
session.rollback()
self.log.exception("select: unexpected exception", lException=e)
return S_ERROR("select: unexpected exception %s" % e)
finally:
session.close()
def delete(self, table, params):
"""
:param table: table from where to delete
:type table: str
:param params: dictionary of which line(s) to delete
:type params: dict
:return: S_OK() || S_ERROR()
"""
session = self.sessionMaker_o()
found = False
for ext in self.extensions:
try:
table_c = getattr(__import__(ext + self.__class__.__module__, globals(), locals(), [table]), table)
found = True
break
except (ImportError, AttributeError):
continue
# If not found in extensions, import it from DIRAC base (this same module).
if not found:
table_c = getattr(__import__(self.__class__.__module__, globals(), locals(), [table]), table)
# handling query conditions found in 'Meta'
older = params.get('Meta', {}).get('older', None)
newer = params.get('Meta', {}).get('newer', None)
order = params.get('Meta', {}).get('order', None)
limit = params.get('Meta', {}).get('limit', None)
params.pop('Meta', None)
try:
deleteQuery = Query(table_c, session=session)
for columnName, columnValue in params.iteritems():
if not columnValue:
continue
column_a = getattr(table_c, columnName.lower())
if isinstance(columnValue, (list, tuple)):
deleteQuery = deleteQuery.filter(column_a.in_(list(columnValue)))
elif isinstance(columnValue, (basestring, datetime.datetime, bool)):
deleteQuery = deleteQuery.filter(column_a == columnValue)
else:
self.log.error("type(columnValue) == %s" % type(columnValue))
if older:
column_a = getattr(table_c, older[0].lower())
deleteQuery = deleteQuery.filter(column_a < older[1])
if newer:
column_a = getattr(table_c, newer[0].lower())
deleteQuery = deleteQuery.filter(column_a > newer[1])
if order:
order = [order] if isinstance(order, basestring) else list(order)
column_a = getattr(table_c, order[0].lower())
if len(order) == 2 and order[1].lower() == 'desc':
deleteQuery = deleteQuery.order_by(desc(column_a))
else:
deleteQuery = deleteQuery.order_by(column_a)
if limit:
deleteQuery = deleteQuery.limit(int(limit))
res = deleteQuery.delete(synchronize_session=False) # FIXME: unsure about it
session.commit()
return S_OK(res)
except exc.SQLAlchemyError as e:
session.rollback()
self.log.exception("delete: unexpected exception", lException=e)
return S_ERROR("delete: unexpected exception %s" % e)
finally:
session.close()
|
chaen/DIRAC
|
Core/Base/SQLAlchemyDB.py
|
Python
|
gpl-3.0
| 10,709
|
[
"DIRAC"
] |
f39e8958f413808121ea6f55e02058d74a2e63775d99e992ad375d4538c8277f
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
import time
m = 1000
n = 2000
display = True
worldRank = El.mpi.WorldRank()
def Rectang(height,width):
A = El.DistMatrix()
El.Uniform( A, height, width )
return A
A = Rectang(m,n)
b = El.DistMatrix()
El.Gaussian( b, m, 1 )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
ctrl = El.LPDirectCtrl_d()
ctrl.mehrotraCtrl.progress = True
startBP = time.clock()
x = El.BP( A, b, ctrl )
endBP = time.clock()
if worldRank == 0:
print "BP time: ", endBP-startBP
if display:
El.Display( x, "x" )
xOneNorm = El.EntrywiseNorm( x, 1 )
e = El.DistMatrix()
El.Copy( b, e )
El.Gemv( El.NORMAL, -1., A, x, 1., e )
if display:
El.Display( e, "e" )
eTwoNorm = El.Nrm2( e )
if worldRank == 0:
print "|| x ||_1 =", xOneNorm
print "|| A x - b ||_2 =", eTwoNorm
# Require the user to press a button before the figures are closed
commSize = El.mpi.Size( El.mpi.COMM_WORLD() )
El.Finalize()
if commSize == 1:
raw_input('Press Enter to exit')
|
sg0/Elemental
|
examples/interface/BPDense.py
|
Python
|
bsd-3-clause
| 1,231
|
[
"Gaussian"
] |
8cdc3e6405de84d01334a0d6a80fbbec140fb1497ed26fe624b2ed9ead428d79
|
"""Pretty-printing (pprint()), the 'Print' Op, debugprint() and pydotprint().
They all allow different way to print a graph or the result of an Op
in a graph(Print Op)
"""
from __future__ import print_function
from copy import copy
import logging
import os
import sys
import warnings
import hashlib
import numpy as np
from six import string_types, integer_types, iteritems
import theano
from theano import gof
from theano import config
from six.moves import StringIO, reduce
from theano.gof import Op, Apply
from theano.compile import Function, debugmode, SharedVariable
from theano.compile.profilemode import ProfileMode
pydot_imported = False
try:
# pydot-ng is a fork of pydot that is better maintained
import pydot_ng as pd
if pd.find_graphviz():
pydot_imported = True
except ImportError:
try:
# fall back on pydot if necessary
import pydot as pd
if pd.find_graphviz():
pydot_imported = True
except ImportError:
pass # tests should not fail on optional dependency
_logger = logging.getLogger("theano.printing")
VALID_ASSOC = set(['left', 'right', 'either'])
def debugprint(obj, depth=-1, print_type=False,
file=None, ids='CHAR', stop_on_name=False,
done=None, print_storage=False):
"""Print a computation graph as text to stdout or a file.
:type obj: Variable, Apply, or Function instance
:param obj: symbolic thing to print
:type depth: integer
:param depth: print graph to this depth (-1 for unlimited)
:type print_type: boolean
:param print_type: whether to print the type of printed objects
:type file: None, 'str', or file-like object
:param file: print to this file ('str' means to return a string)
:type ids: str
:param ids: How do we print the identifier of the variable
id - print the python id value
int - print integer character
CHAR - print capital character
"" - don't print an identifier
:param stop_on_name: When True, if a node in the graph has a name,
we don't print anything below it.
:type done: None or dict
:param done: A dict where we store the ids of printed node.
Useful to have multiple call to debugprint share the same ids.
:type print_storage: bool
:param print_storage: If True, this will print the storage map
for Theano functions. Combined with allow_gc=False, after the
execution of a Theano function, we see the intermediate result.
:returns: string if `file` == 'str', else file arg
Each line printed represents a Variable in the graph.
The indentation of lines corresponds to its depth in the symbolic graph.
The first part of the text identifies whether it is an input
(if a name or type is printed) or the output of some Apply (in which case
the Op is printed).
The second part of the text is an identifier of the Variable.
If print_type is True, we add a part containing the type of the Variable
If a Variable is encountered multiple times in the depth-first search,
it is only printed recursively the first time. Later, just the Variable
identifier is printed.
If an Apply has multiple outputs, then a '.N' suffix will be appended
to the Apply's identifier, to indicate which output a line corresponds to.
"""
if not isinstance(depth, int):
raise Exception("depth parameter must be an int")
if file == 'str':
_file = StringIO()
elif file is None:
_file = sys.stdout
else:
_file = file
if done is None:
done = dict()
results_to_print = []
profile_list = []
order = [] # Toposort
smap = [] # storage_map
if isinstance(obj, (list, tuple, set)):
lobj = obj
else:
lobj = [obj]
for obj in lobj:
if isinstance(obj, gof.Variable):
results_to_print.append(obj)
profile_list.append(None)
smap.append(None)
order.append(None)
elif isinstance(obj, gof.Apply):
results_to_print.extend(obj.outputs)
profile_list.extend([None for item in obj.outputs])
smap.extend([None for item in obj.outputs])
order.extend([None for item in obj.outputs])
elif isinstance(obj, Function):
results_to_print.extend(obj.maker.fgraph.outputs)
profile_list.extend(
[obj.profile for item in obj.maker.fgraph.outputs])
if print_storage:
smap.extend(
[obj.fn.storage_map for item in obj.maker.fgraph.outputs])
else:
smap.extend(
[None for item in obj.maker.fgraph.outputs])
topo = obj.maker.fgraph.toposort()
order.extend(
[topo for item in obj.maker.fgraph.outputs])
elif isinstance(obj, gof.FunctionGraph):
results_to_print.extend(obj.outputs)
profile_list.extend([getattr(obj, 'profile', None)
for item in obj.outputs])
smap.extend([getattr(obj, 'storage_map', None)
for item in obj.outputs])
topo = obj.toposort()
order.extend([topo for item in obj.outputs])
elif isinstance(obj, (integer_types, float, np.ndarray)):
print(obj)
elif isinstance(obj, (theano.In, theano.Out)):
results_to_print.append(obj.variable)
profile_list.append(None)
smap.append(None)
order.append(None)
else:
raise TypeError("debugprint cannot print an object of this type",
obj)
scan_ops = []
if any([p for p in profile_list if p is not None and p.fct_callcount > 0]):
print("""
Timing Info
-----------
--> <time> <% time> - <total time> <% total time>'
<time> computation time for this node
<% time> fraction of total computation time for this node
<total time> time for this node + total times for this node's ancestors
<% total time> total time for this node over total computation time
N.B.:
* Times include the node time and the function overhead.
* <total time> and <% total time> may over-count computation times
if inputs to a node share a common ancestor and should be viewed as a
loose upper bound. Their intended use is to help rule out potential nodes
to remove when optimizing a graph because their <total time> is very low.
""", file=_file)
for r, p, s, o in zip(results_to_print, profile_list, smap, order):
# Add the parent scan op to the list as well
if (hasattr(r.owner, 'op') and
isinstance(r.owner.op, theano.scan_module.scan_op.Scan)):
scan_ops.append(r)
debugmode.debugprint(r, depth=depth, done=done, print_type=print_type,
file=_file, order=o, ids=ids,
scan_ops=scan_ops, stop_on_name=stop_on_name,
profile=p, smap=s)
if len(scan_ops) > 0:
print("", file=_file)
new_prefix = ' >'
new_prefix_child = ' >'
print("Inner graphs of the scan ops:", file=_file)
for s in scan_ops:
# prepare a dict which maps the scan op's inner inputs
# to its outer inputs.
if hasattr(s.owner.op, 'fn'):
# If the op was compiled, print the optimized version.
inner_inputs = s.owner.op.fn.maker.fgraph.inputs
else:
inner_inputs = s.owner.op.inputs
outer_inputs = s.owner.inputs
inner_to_outer_inputs = \
dict([(inner_inputs[i], outer_inputs[o])
for i, o in
s.owner.op.var_mappings['outer_inp_from_inner_inp']
.items()])
print("", file=_file)
debugmode.debugprint(
s, depth=depth, done=done,
print_type=print_type,
file=_file, ids=ids,
scan_ops=scan_ops,
stop_on_name=stop_on_name,
scan_inner_to_outer_inputs=inner_to_outer_inputs)
if hasattr(s.owner.op, 'fn'):
# If the op was compiled, print the optimized version.
outputs = s.owner.op.fn.maker.fgraph.outputs
else:
outputs = s.owner.op.outputs
for idx, i in enumerate(outputs):
if hasattr(i, 'owner') and hasattr(i.owner, 'op'):
if isinstance(i.owner.op, theano.scan_module.scan_op.Scan):
scan_ops.append(i)
debugmode.debugprint(
r=i, prefix=new_prefix,
depth=depth, done=done,
print_type=print_type, file=_file,
ids=ids, stop_on_name=stop_on_name,
prefix_child=new_prefix_child,
scan_ops=scan_ops,
scan_inner_to_outer_inputs=inner_to_outer_inputs)
if file is _file:
return file
elif file == 'str':
return _file.getvalue()
else:
_file.flush()
def _print_fn(op, xin):
for attr in op.attrs:
temp = getattr(xin, attr)
if callable(temp):
pmsg = temp()
else:
pmsg = temp
print(op.message, attr, '=', pmsg)
class Print(Op):
""" This identity-like Op print as a side effect.
This identity-like Op has the side effect of printing a message
followed by its inputs when it runs. Default behaviour is to print
the __str__ representation. Optionally, one can pass a list of the
input member functions to execute, or attributes to print.
@type message: String
@param message: string to prepend to the output
@type attrs: list of Strings
@param attrs: list of input node attributes or member functions to print.
Functions are identified through callable(), executed and
their return value printed.
:note: WARNING. This can disable some optimizations!
(speed and/or stabilization)
Detailed explanation:
As of 2012-06-21 the Print op is not known by any optimization.
Setting a Print op in the middle of a pattern that is usually
optimized out will block the optimization. for example, log(1+x)
optimizes to log1p(x) but log(1+Print(x)) is unaffected by
optimizations.
"""
view_map = {0: [0]}
__props__ = ('message', 'attrs', 'global_fn')
def __init__(self, message="", attrs=("__str__",), global_fn=_print_fn):
self.message = message
self.attrs = tuple(attrs) # attrs should be a hashable iterable
self.global_fn = global_fn
def make_node(self, xin):
xout = xin.type.make_variable()
return Apply(op=self, inputs=[xin], outputs=[xout])
def perform(self, node, inputs, output_storage):
xin, = inputs
xout, = output_storage
xout[0] = xin
self.global_fn(self, xin)
def grad(self, input, output_gradients):
return output_gradients
def R_op(self, inputs, eval_points):
return [x for x in eval_points]
def __setstate__(self, dct):
dct.setdefault('global_fn', _print_fn)
self.__dict__.update(dct)
def c_code_cache_version(self):
return (1,)
class PrinterState(gof.utils.scratchpad):
def __init__(self, props=None, **more_props):
if props is None:
props = {}
if isinstance(props, gof.utils.scratchpad):
self.__update__(props)
else:
self.__dict__.update(props)
self.__dict__.update(more_props)
def clone(self, props=None, **more_props):
if props is None:
props = {}
return PrinterState(self, **dict(props, **more_props))
class OperatorPrinter:
def __init__(self, operator, precedence, assoc='left'):
self.operator = operator
self.precedence = precedence
self.assoc = assoc
assert self.assoc in VALID_ASSOC
def process(self, output, pstate):
pprinter = pstate.pprinter
node = output.owner
if node is None:
raise TypeError("operator %s cannot represent a variable that is "
"not the result of an operation" % self.operator)
# Precedence seems to be buggy, see #249
# So, in doubt, we parenthesize everything.
# outer_precedence = getattr(pstate, 'precedence', -999999)
# outer_assoc = getattr(pstate, 'assoc', 'none')
# if outer_precedence > self.precedence:
# parenthesize = True
# else:
# parenthesize = False
parenthesize = True
input_strings = []
max_i = len(node.inputs) - 1
for i, input in enumerate(node.inputs):
if (self.assoc == 'left' and i != 0 or self.assoc == 'right' and
i != max_i):
s = pprinter.process(input, pstate.clone(
precedence=self.precedence + 1e-6))
else:
s = pprinter.process(input, pstate.clone(
precedence=self.precedence))
input_strings.append(s)
if len(input_strings) == 1:
s = self.operator + input_strings[0]
else:
s = (" %s " % self.operator).join(input_strings)
if parenthesize:
return "(%s)" % s
else:
return s
class PatternPrinter:
def __init__(self, *patterns):
self.patterns = []
for pattern in patterns:
if isinstance(pattern, string_types):
self.patterns.append((pattern, ()))
else:
self.patterns.append((pattern[0], pattern[1:]))
def process(self, output, pstate):
pprinter = pstate.pprinter
node = output.owner
if node is None:
raise TypeError("Patterns %s cannot represent a variable that is "
"not the result of an operation" % self.patterns)
idx = node.outputs.index(output)
pattern, precedences = self.patterns[idx]
precedences += (1000,) * len(node.inputs)
def pp_process(input, precedence):
return pprinter.process(input, pstate.clone(precedence=precedence))
d = dict((str(i), x)
for i, x in enumerate(pp_process(input, precedence)
for input, precedence in
zip(node.inputs, precedences)))
return pattern % d
class FunctionPrinter:
def __init__(self, *names):
self.names = names
def process(self, output, pstate):
pprinter = pstate.pprinter
node = output.owner
if node is None:
raise TypeError("function %s cannot represent a variable that is "
"not the result of an operation" % self.names)
idx = node.outputs.index(output)
name = self.names[idx]
return "%s(%s)" % (name, ", ".join(
[pprinter.process(input, pstate.clone(precedence=-1000))
for input in node.inputs]))
class MemberPrinter:
def __init__(self, *names):
self.names = names
def process(self, output, pstate):
pprinter = pstate.pprinter
node = output.owner
if node is None:
raise TypeError("function %s cannot represent a variable that is"
" not the result of an operation" % self.function)
idx = node.outputs.index(output)
name = self.names[idx]
input = node.inputs[0]
return "%s.%s" % (pprinter.process(input,
pstate.clone(precedence=1000)),
name)
class IgnorePrinter:
def process(self, output, pstate):
pprinter = pstate.pprinter
node = output.owner
if node is None:
raise TypeError("function %s cannot represent a variable that is"
" not the result of an operation" % self.function)
input = node.inputs[0]
return "%s" % pprinter.process(input, pstate)
class DefaultPrinter:
def __init__(self):
pass
def process(self, r, pstate):
pprinter = pstate.pprinter
node = r.owner
if node is None:
return LeafPrinter().process(r, pstate)
return "%s(%s)" % (str(node.op), ", ".join(
[pprinter.process(input, pstate.clone(precedence=-1000))
for input in node.inputs]))
class LeafPrinter:
def process(self, r, pstate):
if r.name in greek:
return greek[r.name]
else:
return str(r)
class PPrinter:
def __init__(self):
self.printers = []
def assign(self, condition, printer):
if isinstance(condition, gof.Op):
op = condition
condition = (lambda pstate, r: r.owner is not None and
r.owner.op == op)
self.printers.insert(0, (condition, printer))
def process(self, r, pstate=None):
if pstate is None:
pstate = PrinterState(pprinter=self)
elif isinstance(pstate, dict):
pstate = PrinterState(pprinter=self, **pstate)
for condition, printer in self.printers:
if condition(pstate, r):
return printer.process(r, pstate)
def clone(self):
cp = copy(self)
cp.printers = list(self.printers)
return cp
def clone_assign(self, condition, printer):
cp = self.clone()
cp.assign(condition, printer)
return cp
def process_graph(self, inputs, outputs, updates=None,
display_inputs=False):
if updates is None:
updates = {}
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
if not isinstance(outputs, (list, tuple)):
outputs = [outputs]
current = None
if display_inputs:
strings = [(0, "inputs: " + ", ".join(
map(str, list(inputs) + updates.keys())))]
else:
strings = []
pprinter = self.clone_assign(lambda pstate, r: r.name is not None and
r is not current, LeafPrinter())
inv_updates = dict((b, a) for (a, b) in iteritems(updates))
i = 1
for node in gof.graph.io_toposort(list(inputs) + updates.keys(),
list(outputs) +
updates.values()):
for output in node.outputs:
if output in inv_updates:
name = str(inv_updates[output])
strings.append((i + 1000, "%s <- %s" % (
name, pprinter.process(output))))
i += 1
if output.name is not None or output in outputs:
if output.name is None:
name = 'out[%i]' % outputs.index(output)
else:
name = output.name
# backport
# name = 'out[%i]' % outputs.index(output) if output.name
# is None else output.name
current = output
try:
idx = 2000 + outputs.index(output)
except ValueError:
idx = i
if len(outputs) == 1 and outputs[0] is output:
strings.append((idx, "return %s" %
pprinter.process(output)))
else:
strings.append((idx, "%s = %s" %
(name, pprinter.process(output))))
i += 1
strings.sort()
return "\n".join(s[1] for s in strings)
def __call__(self, *args):
if len(args) == 1:
return self.process(*args)
elif len(args) == 2 and isinstance(args[1], (PrinterState, dict)):
return self.process(*args)
elif len(args) > 2:
return self.process_graph(*args)
else:
raise TypeError('Not enough arguments to call.')
use_ascii = True
if use_ascii:
special = dict(middle_dot="\\dot",
big_sigma="\\Sigma")
greek = dict(alpha="\\alpha",
beta="\\beta",
gamma="\\gamma",
delta="\\delta",
epsilon="\\epsilon")
else:
special = dict(middle_dot=u"\u00B7",
big_sigma=u"\u03A3")
greek = dict(alpha=u"\u03B1",
beta=u"\u03B2",
gamma=u"\u03B3",
delta=u"\u03B4",
epsilon=u"\u03B5")
pprint = PPrinter()
pprint.assign(lambda pstate, r: True, DefaultPrinter())
pprint.assign(lambda pstate, r: hasattr(pstate, 'target') and
pstate.target is not r and r.name is not None,
LeafPrinter())
pp = pprint
"""
Print to the terminal a math-like expression.
"""
# colors not used: orange, amber#FFBF00, purple, pink,
# used by default: green, blue, grey, red
default_colorCodes = {'GpuFromHost': 'red',
'HostFromGpu': 'red',
'Scan': 'yellow',
'Shape': 'brown',
'IfElse': 'magenta',
'Elemwise': '#FFAABB', # dark pink
'Subtensor': '#FFAAFF', # purple
'Alloc': '#FFAA22', # orange
'Output': 'blue'}
def pydotprint(fct, outfile=None,
compact=True, format='png', with_ids=False,
high_contrast=True, cond_highlight=None, colorCodes=None,
max_label_size=70, scan_graphs=False,
var_with_name_simple=False,
print_output_file=True,
return_image=False,
):
"""Print to a file the graph of a compiled theano function's ops. Supports
all pydot output formats, including png and svg.
:param fct: a compiled Theano function, a Variable, an Apply or
a list of Variable.
:param outfile: the output file where to put the graph.
:param compact: if True, will remove intermediate var that don't have name.
:param format: the file format of the output.
:param with_ids: Print the toposort index of the node in the node name.
and an index number in the variable ellipse.
:param high_contrast: if true, the color that describes the respective
node is filled with its corresponding color, instead of coloring
the border
:param colorCodes: dictionary with names of ops as keys and colors as
values
:param cond_highlight: Highlights a lazy if by sorrounding each of the 3
possible categories of ops with a border. The categories
are: ops that are on the left branch, ops that are on the
right branch, ops that are on both branches
As an alternative you can provide the node that represents
the lazy if
:param scan_graphs: if true it will plot the inner graph of each scan op
in files with the same name as the name given for the main
file to which the name of the scan op is concatenated and
the index in the toposort of the scan.
This index can be printed with the option with_ids.
:param var_with_name_simple: If true and a variable have a name,
we will print only the variable name.
Otherwise, we concatenate the type to the var name.
:param return_image: If True, it will create the image and return it.
Useful to display the image in ipython notebook.
.. code-block:: python
import theano
v = theano.tensor.vector()
from IPython.display import SVG
SVG(theano.printing.pydotprint(v*2, return_image=True,
format='svg'))
In the graph, ellipses are Apply Nodes (the execution of an op)
and boxes are variables. If variables have names they are used as
text (if multiple vars have the same name, they will be merged in
the graph). Otherwise, if the variable is constant, we print its
value and finally we print the type + a unique number to prevent
multiple vars from being merged. We print the op of the apply in
the Apply box with a number that represents the toposort order of
application of those Apply. If an Apply has more than 1 input, we
label each edge between an input and the Apply node with the
input's index.
Variable color code::
- Cyan boxes are SharedVariable, inputs and/or outputs) of the graph,
- Green boxes are inputs variables to the graph,
- Blue boxes are outputs variables of the graph,
- Grey boxes are variables that are not outputs and are not used,
Default apply node code::
- Red ellipses are transfers from/to the gpu
- Yellow are scan node
- Brown are shape node
- Magenta are IfElse node
- Dark pink are elemwise node
- Purple are subtensor
- Orange are alloc node
For edges, they are black by default. If a node returns a view
of an input, we put the corresponding input edge in blue. If it
returns a destroyed input, we put the corresponding edge in red.
.. note::
Since October 20th, 2014, this print the inner function of all
scan separately after the top level debugprint output.
"""
if colorCodes is None:
colorCodes = default_colorCodes
if outfile is None:
outfile = os.path.join(config.compiledir, 'theano.pydotprint.' +
config.device + '.' + format)
if isinstance(fct, Function):
mode = fct.maker.mode
profile = getattr(fct, "profile", None)
if (not isinstance(mode, ProfileMode) or
fct not in mode.profile_stats):
mode = None
outputs = fct.maker.fgraph.outputs
topo = fct.maker.fgraph.toposort()
elif isinstance(fct, gof.FunctionGraph):
mode = None
profile = None
outputs = fct.outputs
topo = fct.toposort()
else:
if isinstance(fct, gof.Variable):
fct = [fct]
elif isinstance(fct, gof.Apply):
fct = fct.outputs
assert isinstance(fct, (list, tuple))
assert all(isinstance(v, gof.Variable) for v in fct)
fct = gof.FunctionGraph(inputs=gof.graph.inputs(fct),
outputs=fct)
mode = None
profile = None
outputs = fct.outputs
topo = fct.toposort()
if not pydot_imported:
raise RuntimeError("Failed to import pydot. You must install pydot"
" and graphviz for `pydotprint` to work.")
g = pd.Dot()
if cond_highlight is not None:
c1 = pd.Cluster('Left')
c2 = pd.Cluster('Right')
c3 = pd.Cluster('Middle')
cond = None
for node in topo:
if (node.op.__class__.__name__ == 'IfElse' and
node.op.name == cond_highlight):
cond = node
if cond is None:
_logger.warn("pydotprint: cond_highlight is set but there is no"
" IfElse node in the graph")
cond_highlight = None
if cond_highlight is not None:
def recursive_pass(x, ls):
if not x.owner:
return ls
else:
ls += [x.owner]
for inp in x.inputs:
ls += recursive_pass(inp, ls)
return ls
left = set(recursive_pass(cond.inputs[1], []))
right = set(recursive_pass(cond.inputs[2], []))
middle = left.intersection(right)
left = left.difference(middle)
right = right.difference(middle)
middle = list(middle)
left = list(left)
right = list(right)
var_str = {}
var_id = {}
all_strings = set()
def var_name(var):
if var in var_str:
return var_str[var], var_id[var]
if var.name is not None:
if var_with_name_simple:
varstr = var.name
else:
varstr = 'name=' + var.name + " " + str(var.type)
elif isinstance(var, gof.Constant):
dstr = 'val=' + str(np.asarray(var.data))
if '\n' in dstr:
dstr = dstr[:dstr.index('\n')]
varstr = '%s %s' % (dstr, str(var.type))
elif (var in input_update and
input_update[var].name is not None):
varstr = input_update[var].name
if not var_with_name_simple:
varstr += str(var.type)
else:
# a var id is needed as otherwise var with the same type will be
# merged in the graph.
varstr = str(var.type)
if len(varstr) > max_label_size:
varstr = varstr[:max_label_size - 3] + '...'
var_str[var] = varstr
var_id[var] = str(id(var))
all_strings.add(varstr)
return varstr, var_id[var]
apply_name_cache = {}
apply_name_id = {}
def apply_name(node):
if node in apply_name_cache:
return apply_name_cache[node], apply_name_id[node]
prof_str = ''
if mode:
time = mode.profile_stats[fct].apply_time.get(node, 0)
# second, % total time in profiler, %fct time in profiler
if mode.local_time == 0:
pt = 0
else:
pt = time * 100 / mode.local_time
if mode.profile_stats[fct].fct_callcount == 0:
pf = 0
else:
pf = time * 100 / mode.profile_stats[fct].fct_call_time
prof_str = ' (%.3fs,%.3f%%,%.3f%%)' % (time, pt, pf)
elif profile:
time = profile.apply_time.get(node, 0)
# second, %fct time in profiler
if profile.fct_callcount == 0:
pf = 0
else:
pf = time * 100 / profile.fct_call_time
prof_str = ' (%.3fs,%.3f%%)' % (time, pf)
applystr = str(node.op).replace(':', '_')
applystr += prof_str
if (applystr in all_strings) or with_ids:
idx = ' id=' + str(topo.index(node))
if len(applystr) + len(idx) > max_label_size:
applystr = (applystr[:max_label_size - 3 - len(idx)] + idx +
'...')
else:
applystr = applystr + idx
elif len(applystr) > max_label_size:
applystr = applystr[:max_label_size - 3] + '...'
idx = 1
while applystr in all_strings:
idx += 1
suffix = ' id=' + str(idx)
applystr = (applystr[:max_label_size - 3 - len(suffix)] +
'...' +
suffix)
all_strings.add(applystr)
apply_name_cache[node] = applystr
apply_name_id[node] = str(id(node))
return applystr, apply_name_id[node]
# Update the inputs that have an update function
input_update = {}
reverse_input_update = {}
# Here outputs can be the original list, as we should not change
# it, we must copy it.
outputs = list(outputs)
if isinstance(fct, Function):
function_inputs = zip(fct.maker.expanded_inputs, fct.maker.fgraph.inputs)
for i, fg_ii in reversed(list(function_inputs)):
if i.update is not None:
k = outputs.pop()
# Use the fgaph.inputs as it isn't the same as maker.inputs
input_update[k] = fg_ii
reverse_input_update[fg_ii] = k
apply_shape = 'ellipse'
var_shape = 'box'
for node_idx, node in enumerate(topo):
astr, aid = apply_name(node)
use_color = None
for opName, color in iteritems(colorCodes):
if opName in node.op.__class__.__name__:
use_color = color
if use_color is None:
nw_node = pd.Node(aid, label=astr, shape=apply_shape)
elif high_contrast:
nw_node = pd.Node(aid, label=astr, style='filled',
fillcolor=use_color,
shape=apply_shape)
else:
nw_node = pd.Node(aid, label=astr,
color=use_color, shape=apply_shape)
g.add_node(nw_node)
if cond_highlight:
if node in middle:
c3.add_node(nw_node)
elif node in left:
c1.add_node(nw_node)
elif node in right:
c2.add_node(nw_node)
for idx, var in enumerate(node.inputs):
varstr, varid = var_name(var)
label = ""
if len(node.inputs) > 1:
label = str(idx)
param = {}
if label:
param['label'] = label
if hasattr(node.op, 'view_map') and idx in reduce(
list.__add__, node.op.view_map.values(), []):
param['color'] = colorCodes['Output']
elif hasattr(node.op, 'destroy_map') and idx in reduce(
list.__add__, node.op.destroy_map.values(), []):
param['color'] = 'red'
if var.owner is None:
color = 'green'
if isinstance(var, SharedVariable):
# Input are green, output blue
# Mixing blue and green give cyan! (input and output var)
color = "cyan"
if high_contrast:
g.add_node(pd.Node(varid,
style='filled',
fillcolor=color,
label=varstr,
shape=var_shape))
else:
g.add_node(pd.Node(varid,
color=color,
label=varstr,
shape=var_shape))
g.add_edge(pd.Edge(varid, aid, **param))
elif var.name or not compact or var in outputs:
g.add_edge(pd.Edge(varid, aid, **param))
else:
# no name, so we don't make a var ellipse
if label:
label += " "
label += str(var.type)
if len(label) > max_label_size:
label = label[:max_label_size - 3] + '...'
param['label'] = label
g.add_edge(pd.Edge(apply_name(var.owner)[1], aid, **param))
for idx, var in enumerate(node.outputs):
varstr, varid = var_name(var)
out = var in outputs
label = ""
if len(node.outputs) > 1:
label = str(idx)
if len(label) > max_label_size:
label = label[:max_label_size - 3] + '...'
param = {}
if label:
param['label'] = label
if out or var in input_update:
g.add_edge(pd.Edge(aid, varid, **param))
if high_contrast:
g.add_node(pd.Node(varid, style='filled',
label=varstr,
fillcolor=colorCodes['Output'], shape=var_shape))
else:
g.add_node(pd.Node(varid, color=colorCodes['Output'],
label=varstr,
shape=var_shape))
elif len(var.clients) == 0:
g.add_edge(pd.Edge(aid, varid, **param))
# grey mean that output var isn't used
if high_contrast:
g.add_node(pd.Node(varid, style='filled',
label=varstr,
fillcolor='grey', shape=var_shape))
else:
g.add_node(pd.Node(varid, label=varstr,
color='grey', shape=var_shape))
elif var.name or not compact:
if not(not compact):
if label:
label += " "
label += str(var.type)
if len(label) > max_label_size:
label = label[:max_label_size - 3] + '...'
param['label'] = label
g.add_edge(pd.Edge(aid, varid, **param))
g.add_node(pd.Node(varid, shape=var_shape, label=varstr))
# else:
# don't add egde here as it is already added from the inputs.
# The var that represent updates, must be linked to the input var.
for sha, up in input_update.items():
_, shaid = var_name(sha)
_, upid = var_name(up)
g.add_edge(pd.Edge(shaid, upid, label="UPDATE", color=colorCodes['Output']))
if cond_highlight:
g.add_subgraph(c1)
g.add_subgraph(c2)
g.add_subgraph(c3)
if not outfile.endswith('.' + format):
outfile += '.' + format
if scan_graphs:
scan_ops = [(idx, x) for idx, x in enumerate(topo)
if isinstance(x.op, theano.scan_module.scan_op.Scan)]
path, fn = os.path.split(outfile)
basename = '.'.join(fn.split('.')[:-1])
# Safe way of doing things .. a file name may contain multiple .
ext = fn[len(basename):]
for idx, scan_op in scan_ops:
# is there a chance that name is not defined?
if hasattr(scan_op.op, 'name'):
new_name = basename + '_' + scan_op.op.name + '_' + str(idx)
else:
new_name = basename + '_' + str(idx)
new_name = os.path.join(path, new_name + ext)
if hasattr(scan_op.op, 'fn'):
to_print = scan_op.op.fn
else:
to_print = scan_op.op.outputs
pydotprint(to_print, new_name, compact, format, with_ids,
high_contrast, cond_highlight, colorCodes,
max_label_size, scan_graphs)
if return_image:
return g.create(prog='dot', format=format)
else:
try:
g.write(outfile, prog='dot', format=format)
except pd.InvocationException:
# based on https://github.com/Theano/Theano/issues/2988
version = getattr(pd, '__version__', "")
if version and [int(n) for n in version.split(".")] < [1, 0, 28]:
raise Exception("Old version of pydot detected, which can "
"cause issues with pydot printing. Try "
"upgrading pydot version to a newer one")
raise
if print_output_file:
print('The output file is available at', outfile)
def pydotprint_variables(vars,
outfile=None,
format='png',
depth=-1,
high_contrast=True, colorCodes=None,
max_label_size=50,
var_with_name_simple=False):
'''DEPRECATED: use pydotprint() instead.
Identical to pydotprint just that it starts from a variable
instead of a compiled function. Could be useful ?
'''
warnings.warn("pydotprint_variables() is deprecated."
" Use pydotprint() instead.")
if colorCodes is None:
colorCodes = default_colorCodes
if outfile is None:
outfile = os.path.join(config.compiledir, 'theano.pydotprint.' +
config.device + '.' + format)
if not pydot_imported:
raise RuntimeError("Failed to import pydot. You must install pydot"
" and graphviz for `pydotprint_variables` to work.")
if pd.__name__ == "pydot_ng":
raise RuntimeError("pydotprint_variables do not support pydot_ng."
"pydotprint_variables is also deprecated, "
"use pydotprint() that support pydot_ng")
g = pd.Dot()
my_list = {}
orphanes = []
if type(vars) not in (list, tuple):
vars = [vars]
var_str = {}
def var_name(var):
if var in var_str:
return var_str[var]
if var.name is not None:
if var_with_name_simple:
varstr = var.name
else:
varstr = 'name=' + var.name + " " + str(var.type)
elif isinstance(var, gof.Constant):
dstr = 'val=' + str(var.data)
if '\n' in dstr:
dstr = dstr[:dstr.index('\n')]
varstr = '%s %s' % (dstr, str(var.type))
else:
# a var id is needed as otherwise var with the same type will be
# merged in the graph.
varstr = str(var.type)
varstr += ' ' + str(len(var_str))
if len(varstr) > max_label_size:
varstr = varstr[:max_label_size - 3] + '...'
var_str[var] = varstr
return varstr
def apply_name(node):
name = str(node.op).replace(':', '_')
if len(name) > max_label_size:
name = name[:max_label_size - 3] + '...'
return name
def plot_apply(app, d):
if d == 0:
return
if app in my_list:
return
astr = apply_name(app) + '_' + str(len(my_list.keys()))
if len(astr) > max_label_size:
astr = astr[:max_label_size - 3] + '...'
my_list[app] = astr
use_color = None
for opName, color in iteritems(colorCodes):
if opName in app.op.__class__.__name__:
use_color = color
if use_color is None:
g.add_node(pd.Node(astr, shape='box'))
elif high_contrast:
g.add_node(pd.Node(astr, style='filled', fillcolor=use_color,
shape='box'))
else:
g.add_node(pd.Nonde(astr, color=use_color, shape='box'))
for i, nd in enumerate(app.inputs):
if nd not in my_list:
varastr = var_name(nd) + '_' + str(len(my_list.keys()))
if len(varastr) > max_label_size:
varastr = varastr[:max_label_size - 3] + '...'
my_list[nd] = varastr
if nd.owner is not None:
g.add_node(pd.Node(varastr))
elif high_contrast:
g.add_node(pd.Node(varastr, style='filled',
fillcolor='green'))
else:
g.add_node(pd.Node(varastr, color='green'))
else:
varastr = my_list[nd]
label = None
if len(app.inputs) > 1:
label = str(i)
g.add_edge(pd.Edge(varastr, astr, label=label))
for i, nd in enumerate(app.outputs):
if nd not in my_list:
varastr = var_name(nd) + '_' + str(len(my_list.keys()))
if len(varastr) > max_label_size:
varastr = varastr[:max_label_size - 3] + '...'
my_list[nd] = varastr
color = None
if nd in vars:
color = colorCodes['Output']
elif nd in orphanes:
color = 'gray'
if color is None:
g.add_node(pd.Node(varastr))
elif high_contrast:
g.add_node(pd.Node(varastr, style='filled',
fillcolor=color))
else:
g.add_node(pd.Node(varastr, color=color))
else:
varastr = my_list[nd]
label = None
if len(app.outputs) > 1:
label = str(i)
g.add_edge(pd.Edge(astr, varastr, label=label))
for nd in app.inputs:
if nd.owner:
plot_apply(nd.owner, d - 1)
for nd in vars:
if nd.owner:
for k in nd.owner.outputs:
if k not in vars:
orphanes.append(k)
for nd in vars:
if nd.owner:
plot_apply(nd.owner, depth)
try:
g.write(outfile, prog='dot', format=format)
except pd.InvocationException as e:
# Some version of pydot are bugged/don't work correctly with
# empty label. Provide a better user error message.
version = getattr(pd, '__version__', "")
if version == "1.0.28" and "label=]" in e.message:
raise Exception("pydot 1.0.28 is know to be bugged. Use another "
"working version of pydot")
elif "label=]" in e.message:
raise Exception("Your version of pydot " + version +
" returned an error. Version 1.0.28 is known"
" to be bugged and 1.0.25 to be working with"
" Theano. Using another version of pydot could"
" fix this problem. The pydot error is: " +
e.message)
raise
print('The output file is available at', outfile)
class _TagGenerator:
""" Class for giving abbreviated tags like to objects.
Only really intended for internal use in order to
implement min_informative_st """
def __init__(self):
self.cur_tag_number = 0
def get_tag(self):
rval = debugmode.char_from_number(self.cur_tag_number)
self.cur_tag_number += 1
return rval
def min_informative_str(obj, indent_level=0,
_prev_obs=None, _tag_generator=None):
"""
Returns a string specifying to the user what obj is
The string will print out as much of the graph as is needed
for the whole thing to be specified in terms only of constants
or named variables.
Parameters
----------
obj: the name to convert to a string
indent_level: the number of tabs the tree should start printing at
(nested levels of the tree will get more tabs)
_prev_obs: should only be used by min_informative_str
a dictionary mapping previously converted
objects to short tags
Basic design philosophy
-----------------------
The idea behind this function is that it can be used as parts of
command line tools for debugging or for error messages. The
information displayed is intended to be concise and easily read by
a human. In particular, it is intended to be informative when
working with large graphs composed of subgraphs from several
different people's code, as in pylearn2.
Stopping expanding subtrees when named variables are encountered
makes it easier to understand what is happening when a graph
formed by composing several different graphs made by code written
by different authors has a bug.
An example output is:
A. Elemwise{add_no_inplace}
B. log_likelihood_v_given_h
C. log_likelihood_h
If the user is told they have a problem computing this value, it's
obvious that either log_likelihood_h or log_likelihood_v_given_h
has the wrong dimensionality. The variable's str object would only
tell you that there was a problem with an
Elemwise{add_no_inplace}. Since there are many such ops in a
typical graph, such an error message is considerably less
informative. Error messages based on this function should convey
much more information about the location in the graph of the error
while remaining succint.
One final note: the use of capital letters to uniquely identify
nodes within the graph is motivated by legibility. I do not use
numbers or lower case letters since these are pretty common as
parts of names of ops, etc. I also don't use the object's id like
in debugprint because it gives such a long string that takes time
to visually diff.
"""
if _prev_obs is None:
_prev_obs = {}
indent = ' ' * indent_level
if id(obj) in _prev_obs:
tag = _prev_obs[id(obj)]
return indent + '<' + tag + '>'
if _tag_generator is None:
_tag_generator = _TagGenerator()
cur_tag = _tag_generator.get_tag()
_prev_obs[id(obj)] = cur_tag
if hasattr(obj, '__array__'):
name = '<ndarray>'
elif hasattr(obj, 'name') and obj.name is not None:
name = obj.name
elif hasattr(obj, 'owner') and obj.owner is not None:
name = str(obj.owner.op)
for ipt in obj.owner.inputs:
name += '\n'
name += min_informative_str(ipt,
indent_level=indent_level + 1,
_prev_obs=_prev_obs,
_tag_generator=_tag_generator)
else:
name = str(obj)
prefix = cur_tag + '. '
rval = indent + prefix + name
return rval
def var_descriptor(obj, _prev_obs=None, _tag_generator=None):
"""
Returns a string, with no endlines, fully specifying
how a variable is computed. Does not include any memory
location dependent information such as the id of a node.
"""
if _prev_obs is None:
_prev_obs = {}
if id(obj) in _prev_obs:
tag = _prev_obs[id(obj)]
return '<' + tag + '>'
if _tag_generator is None:
_tag_generator = _TagGenerator()
cur_tag = _tag_generator.get_tag()
_prev_obs[id(obj)] = cur_tag
if hasattr(obj, '__array__'):
# hashlib hashes only the contents of the buffer, but
# it can have different semantics depending on the strides
# of the ndarray
name = '<ndarray:'
name += 'strides=[' + ','.join(str(stride)
for stride in obj.strides) + ']'
name += ',digest=' + hashlib.md5(obj).hexdigest() + '>'
elif hasattr(obj, 'owner') and obj.owner is not None:
name = str(obj.owner.op) + '('
name += ','.join(var_descriptor(ipt,
_prev_obs=_prev_obs,
_tag_generator=_tag_generator)
for ipt in obj.owner.inputs)
name += ')'
elif hasattr(obj, 'name') and obj.name is not None:
# Only print the name if there is no owner.
# This way adding a name to an intermediate node can't make
# a deeper graph get the same descriptor as a shallower one
name = obj.name
else:
name = str(obj)
if ' at 0x' in name:
# The __str__ method is encoding the object's id in its str
name = position_independent_str(obj)
if ' at 0x' in name:
print(name)
assert False
prefix = cur_tag + '='
rval = prefix + name
return rval
def position_independent_str(obj):
if isinstance(obj, theano.gof.graph.Variable):
rval = 'theano_var'
rval += '{type=' + str(obj.type) + '}'
else:
raise NotImplementedError()
return rval
def hex_digest(x):
"""
Returns a short, mostly hexadecimal hash of a numpy ndarray
"""
assert isinstance(x, np.ndarray)
rval = hashlib.md5(x.tostring()).hexdigest()
# hex digest must be annotated with strides to avoid collisions
# because the buffer interface only exposes the raw data, not
# any info about the semantics of how that data should be arranged
# into a tensor
rval = rval + '|strides=[' + ','.join(str(stride)
for stride in x.strides) + ']'
rval = rval + '|shape=[' + ','.join(str(s) for s in x.shape) + ']'
return rval
|
rizar/attention-lvcsr
|
libs/Theano/theano/printing.py
|
Python
|
mit
| 52,435
|
[
"Amber"
] |
921847b78e36177d8685e1ba54fcbc78346e50f6ffcd2c7ee4965c95c440b175
|
"""
#This software was developed by the University of Tennessee as part of the
#Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
#project funded by the US National Science Foundation.
#See the license text in license.txt
"""
from __future__ import division
import numpy as np # type: ignore
from numpy import pi, cos, sin, sqrt # type: ignore
from . import resolution
from .resolution import Resolution
## Singular point
SIGMA_ZERO = 1.0e-010
## Limit of how many sigmas to be covered for the Gaussian smearing
# default: 2.5 to cover 98.7% of Gaussian
NSIGMA = 3.0
## Defaults
NR = {'xhigh':10, 'high':5, 'med':5, 'low':3}
NPHI = {'xhigh':20, 'high':12, 'med':6, 'low':4}
## Defaults
N_SLIT_PERP = {'xhigh':1000, 'high':500, 'med':200, 'low':50}
N_SLIT_PERP_DOC = ", ".join("%s=%d"%(name, value)
for value, name in
sorted((2*v+1, k) for k, v in N_SLIT_PERP.items()))
class Pinhole2D(Resolution):
"""
Gaussian Q smearing class for SAS 2d data
"""
def __init__(self, data=None, index=None,
nsigma=NSIGMA, accuracy='Low', coords='polar'):
"""
Assumption: equally spaced bins in dq_r, dq_phi space.
:param data: 2d data used to set the smearing parameters
:param index: 1d array with len(data) to define the range
of the calculation: elements are given as True or False
:param nr: number of bins in dq_r-axis
:param nphi: number of bins in dq_phi-axis
:param coord: coordinates [string], 'polar' or 'cartesian'
"""
## Accuracy: Higher stands for more sampling points in both directions
## of r and phi.
## number of bins in r axis for over-sampling
self.nr = NR[accuracy.lower()]
## number of bins in phi axis for over-sampling
self.nphi = NPHI[accuracy.lower()]
## maximum nsigmas
self.nsigma = nsigma
self.coords = coords
self._init_data(data, index)
def _init_data(self, data, index):
"""
Get qx_data, qy_data, dqx_data,dqy_data,
and calculate phi_data=arctan(qx_data/qy_data)
"""
# TODO: maybe don't need to hold copy of qx,qy,dqx,dqy,data,index
# just need q_calc and weights
self.data = data
self.index = index if index is not None else slice(None)
self.qx_data = data.qx_data[self.index]
self.qy_data = data.qy_data[self.index]
self.q_data = data.q_data[self.index]
dqx = getattr(data, 'dqx_data', None)
dqy = getattr(data, 'dqy_data', None)
if dqx is not None and dqy is not None:
# Here dqx and dqy mean dq_parr and dq_perp
self.dqx_data = dqx[self.index]
self.dqy_data = dqy[self.index]
## Remove singular points if exists
self.dqx_data[self.dqx_data < SIGMA_ZERO] = SIGMA_ZERO
self.dqy_data[self.dqy_data < SIGMA_ZERO] = SIGMA_ZERO
qx_calc, qy_calc, weights = self._calc_res()
self.q_calc = [qx_calc, qy_calc]
self.q_calc_weights = weights
else:
# No resolution information
self.dqx_data = self.dqy_data = None
self.q_calc = [self.qx_data, self.qy_data]
self.q_calc_weights = None
#self.phi_data = np.arctan(self.qx_data / self.qy_data)
def _calc_res(self):
"""
Over sampling of r_nbins times phi_nbins, calculate Gaussian weights,
then find smeared intensity
"""
nr, nphi = self.nr, self.nphi
# Total number of bins = # of bins
nbins = nr * nphi
# Number of bins in the dqr direction (polar coordinate of dqx and dqy)
bin_size = self.nsigma / nr
# in dq_r-direction times # of bins in dq_phi-direction
# data length in the range of self.index
nq = len(self.qx_data)
# Mean values of dqr at each bins
# starting from the half of bin size
r = bin_size / 2.0 + np.arange(nr) * bin_size
# mean values of qphi at each bines
phi = np.arange(nphi)
dphi = phi * 2.0 * pi / nphi
dphi = dphi.repeat(nr)
## Transform to polar coordinate,
# and set dphi at each data points ; 1d array
dphi = dphi.repeat(nq)
q_phi = self.qy_data / self.qx_data
# Starting angle is different between polar
# and cartesian coordinates.
#if self.coords != 'polar':
# dphi += np.arctan( q_phi * self.dqx_data/ \
# self.dqy_data).repeat(nbins).reshape(nq,\
# nbins).transpose().flatten()
# The angle (phi) of the original q point
q_phi = np.arctan(q_phi).repeat(nbins)\
.reshape([nq, nbins]).transpose().flatten()
## Find Gaussian weight for each dq bins: The weight depends only
# on r-direction (The integration may not need)
weight_res = (np.exp(-0.5 * (r - bin_size / 2.0)**2) -
np.exp(-0.5 * (r + bin_size / 2.0)**2))
# No needs of normalization here.
#weight_res /= np.sum(weight_res)
weight_res = weight_res.repeat(nphi).reshape(nr, nphi)
weight_res = weight_res.transpose().flatten()
## Set dr for all dq bins for averaging
dr = r.repeat(nphi).reshape(nr, nphi).transpose().flatten()
## Set dqr for all data points
dqx = np.outer(dr, self.dqx_data).flatten()
dqy = np.outer(dr, self.dqy_data).flatten()
qx = self.qx_data.repeat(nbins)\
.reshape(nq, nbins).transpose().flatten()
qy = self.qy_data.repeat(nbins)\
.reshape(nq, nbins).transpose().flatten()
# The polar needs rotation by -q_phi
if self.coords == 'polar':
q_r = sqrt(qx**2 + qy**2)
qx_res = ((dqx*cos(dphi) + q_r) * cos(-q_phi)
+ dqy*sin(dphi) * sin(-q_phi))
qy_res = (-(dqx*cos(dphi) + q_r) * sin(-q_phi)
+ dqy*sin(dphi) * cos(-q_phi))
else:
qx_res = qx + dqx*cos(dphi)
qy_res = qy + dqy*sin(dphi)
return qx_res, qy_res, weight_res
def apply(self, theory):
if self.q_calc_weights is not None:
# TODO: interpolate rather than recomputing all the different qx,qy
# Resolution needs to be applied
nq, nbins = len(self.qx_data), self.nr * self.nphi
## Reshape into 2d array to use np weighted averaging
theory = np.reshape(theory, (nbins, nq))
## Averaging with Gaussian weighting: normalization included.
value = np.average(theory, axis=0, weights=self.q_calc_weights)
## Return the smeared values in the range of self.index
return value
else:
return theory
class Slit2D(Resolution):
"""
Slit aperture with resolution function on an oriented sample.
*q* points at which the data is measured.
*qx_width* slit width in qx
*qy_width* slit height in qy; current implementation requires a fixed
qy_width for all q points.
*q_calc* is the list of q points to calculate, or None if this
should be estimated from the *q* and *qx_width*.
*accuracy* determines the number of *qy* points to compute for each *q*.
The values are stored in sasmodels.resolution2d.N_SLIT_PERP. The default
values are: %s
"""
__doc__ = __doc__%N_SLIT_PERP_DOC
def __init__(self, q, qx_width, qy_width=0., q_calc=None, accuracy='low'):
# Remember what q and width was used even though we won't need them
# after the weight matrix is constructed
self.q, self.qx_width, self.qy_width = q, qx_width, qy_width
# Allow independent resolution on each qx point even though it is not
# needed in practice. Set qy_width to the maximum qy width.
if np.isscalar(qx_width):
qx_width = np.ones(len(q))*qx_width
else:
qx_width = np.asarray(qx_width)
if not np.isscalar(qy_width):
qy_width = np.max(qy_width)
# Build grid of qx, qy points
if q_calc is not None:
qx_calc = np.sort(q_calc)
else:
qx_calc = resolution.pinhole_extend_q(q, qx_width, nsigma=3)
qy_min, qy_max = np.log10(np.min(q)), np.log10(qy_width)
qy_calc = np.logspace(qy_min, qy_max, N_SLIT_PERP[accuracy])
qy_calc = np.hstack((-qy_calc[::-1], 0, qy_calc))
self.q_calc = [v.flatten() for v in np.meshgrid(qx_calc, qy_calc)]
self.qx_calc, self.qy_calc = qx_calc, qy_calc
self.nx, self.ny = len(qx_calc), len(qy_calc)
self.dy = 2*qy_width/self.ny
# Build weight matrix for resolution integration
if np.any(qx_width > 0):
self.weights = resolution.pinhole_resolution(
qx_calc, q, np.maximum(qx_width, resolution.MINIMUM_RESOLUTION))
elif len(qx_calc) == len(q) and np.all(qx_calc == q):
self.weights = None
else:
raise ValueError("Slit2D fails with q_calc != q")
def apply(self, theory):
Iq = np.trapz(theory.reshape(self.ny, self.nx), axis=0, x=self.qy_calc)
if self.weights is not None:
Iq = resolution.apply_resolution_matrix(self.weights, Iq)
return Iq
|
SasView/sasmodels
|
sasmodels/resolution2d.py
|
Python
|
bsd-3-clause
| 9,437
|
[
"Gaussian"
] |
36eb954b9971fe7949e6cf800b98934847c7ff606017a97869bbabec01f5fddf
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2008-2009 Gary Burton
# Copyright (C) 2008 Robert Cheramy <robert@cheramy.net>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2012 Doug Blank <doug.blank@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"Export to GEDCOM"
#-------------------------------------------------------------------------
#
# Standard Python Modules
#
#-------------------------------------------------------------------------
import os
import time
import io
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
from gramps.gen.lib import AttributeType, ChildRefType, Citation, Date, EventRoleType, EventType, LdsOrd, NameType, NoteType, Person, UrlType
from gramps.gen.const import VERSION
import gramps.plugins.lib.libgedcom as libgedcom
from gramps.gen.errors import DatabaseError
from gramps.gui.plug.export import WriterOptionBox
from gramps.gen.updatecallback import UpdateCallback
from gramps.gen.utils.file import media_path_full
from gramps.gen.utils.place import conv_lat_lon
from gramps.gen.constfunc import cuni
#-------------------------------------------------------------------------
#
# GEDCOM tags representing attributes that may take a parameter, value or
# description on the same line as the tag
#
#-------------------------------------------------------------------------
NEEDS_PARAMETER = set(
["CAST", "DSCR", "EDUC", "IDNO", "NATI", "NCHI",
"NMR", "OCCU", "PROP", "RELI", "SSN", "TITL"])
LDS_ORD_NAME = {
LdsOrd.BAPTISM : 'BAPL',
LdsOrd.ENDOWMENT : 'ENDL',
LdsOrd.SEAL_TO_PARENTS : 'SLGC',
LdsOrd.SEAL_TO_SPOUSE : 'SLGS',
LdsOrd.CONFIRMATION : 'CONL',
}
LDS_STATUS = {
LdsOrd.STATUS_BIC : "BIC",
LdsOrd.STATUS_CANCELED : "CANCELED",
LdsOrd.STATUS_CHILD : "CHILD",
LdsOrd.STATUS_CLEARED : "CLEARED",
LdsOrd.STATUS_COMPLETED : "COMPLETED",
LdsOrd.STATUS_DNS : "DNS",
LdsOrd.STATUS_INFANT : "INFANT",
LdsOrd.STATUS_PRE_1970 : "PRE-1970",
LdsOrd.STATUS_QUALIFIED : "QUALIFIED",
LdsOrd.STATUS_DNS_CAN : "DNS/CAN",
LdsOrd.STATUS_STILLBORN : "STILLBORN",
LdsOrd.STATUS_SUBMITTED : "SUBMITTED" ,
LdsOrd.STATUS_UNCLEARED : "UNCLEARED",
}
LANGUAGES = {
'cs' : 'Czech', 'da' : 'Danish', 'nl' : 'Dutch',
'en' : 'English', 'eo' : 'Esperanto', 'fi' : 'Finnish',
'fr' : 'French', 'de' : 'German', 'hu' : 'Hungarian',
'it' : 'Italian', 'lt' : 'Latvian', 'lv' : 'Lithuanian',
'no' : 'Norwegian', 'po' : 'Polish', 'pt' : 'Portuguese',
'ro' : 'Romanian', 'sk' : 'Slovak', 'es' : 'Spanish',
'sv' : 'Swedish', 'ru' : 'Russian',
}
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
MIME2GED = {
"image/bmp" : "bmp",
"image/gif" : "gif",
"image/jpeg" : "jpeg",
"image/x-pcx" : "pcx",
"image/tiff" : "tiff",
"audio/x-wav" : "wav"
}
QUALITY_MAP = {
Citation.CONF_VERY_HIGH : "3",
Citation.CONF_HIGH : "2",
Citation.CONF_LOW : "1",
Citation.CONF_VERY_LOW : "0",
}
#-------------------------------------------------------------------------
#
# sort_handles_by_id
#
#-------------------------------------------------------------------------
def sort_handles_by_id(handle_list, handle_to_object):
"""
Sort a list of handles by the Gramps ID.
The function that returns the object from the handle needs to be supplied
so that we get the right object.
"""
sorted_list = []
for handle in handle_list:
obj = handle_to_object(handle)
if obj:
data = (obj.get_gramps_id(), handle)
sorted_list.append (data)
sorted_list.sort()
return sorted_list
#-------------------------------------------------------------------------
#
# breakup
#
#-------------------------------------------------------------------------
def breakup(txt, limit):
"""
Break a line of text into a list of strings that conform to the
maximum length specified, while breaking words in the middle of a word
to avoid issues with spaces.
"""
if limit < 1:
raise ValueError("breakup: unexpected limit: %r" % limit)
data = []
while len(txt) > limit:
# look for non-space pair to break between
# do not break within a UTF-8 byte sequence, i. e. first char >127
idx = limit
while (idx>0 and (txt[idx-1].isspace() or txt[idx].isspace()
or ord(txt[idx-1]) > 127)):
idx -= 1
if idx == 0:
#no words to break on, just break at limit anyway
idx = limit
data.append(txt[:idx])
txt = txt[idx:]
if len(txt) > 0:
data.append(txt)
return data
#-------------------------------------------------------------------------
#
# event_has_subordinate_data
# may want to compare description w/ auto-generated one, and
# if so, treat it same as if it were empty for this purpose
#
#-------------------------------------------------------------------------
def event_has_subordinate_data(event, event_ref):
if event and event_ref:
return (event.get_description().strip() or
not event.get_date_object().is_empty() or
event.get_place_handle() or
event.get_attribute_list() or
event_ref.get_attribute_list() or
event.get_note_list() or
event.get_citation_list() or
event.get_media_list())
else:
return False
#-------------------------------------------------------------------------
#
# GedcomWriter class
#
#-------------------------------------------------------------------------
class GedcomWriter(UpdateCallback):
"""
The GEDCOM writer creates a GEDCOM file that contains the exported
information from the database. It derives from UpdateCallback
so that it can provide visual feedback via a progress bar if needed.
"""
def __init__(self, database, user, option_box=None):
UpdateCallback.__init__(self, user.callback)
self.total = 100
self.dbase = database
self.dirname = None
self.gedcom_file = None
# The number of different stages other than any of the optional filters
# which the write_gedcom_file method will call.
self.progress_cnt = 5
self.setup(option_box)
def setup(self, option_box):
"""
If the option_box is present (GUI interface), then we check the
"private", "restrict", and "cfilter" arguments to see if we need
to apply proxy databases.
"""
if option_box:
option_box.parse_options()
self.dbase = option_box.get_filtered_database(self.dbase, self)
def write_gedcom_file(self, filename):
"""
Write the actual GEDCOM file to the specified filename.
"""
self.dirname = os.path.dirname (filename)
self.gedcom_file = io.open(filename, "w", encoding='utf-8')
self._header(filename)
self._submitter()
self._individuals()
self._families()
self._sources()
self._repos()
self._notes()
self._writeln(0, "TRLR")
self.gedcom_file.close()
return True
def _writeln(self, level, token, textlines="", limit=72):
"""
Write a line of text to the output file in the form of:
LEVEL TOKEN text
If the line contains newlines, it is broken into multiple lines using
the CONT token. If any line is greater than the limit, it will broken
into multiple lines using CONC.
"""
assert(token)
if textlines:
# break the line into multiple lines if a newline is found
textlines = textlines.replace('\n\r', '\n')
textlines = textlines.replace('\r', '\n')
textlist = textlines.split('\n')
token_level = level
for text in textlist:
# make it unicode so that breakup below does the right thin.
text = cuni(text)
if limit:
prefix = cuni("\n%d CONC " % (level + 1))
txt = prefix.join(breakup(text, limit))
else:
txt = text
self.gedcom_file.write(cuni("%d %s %s\n" % (token_level, token, txt)))
token_level = level + 1
token = "CONT"
else:
self.gedcom_file.write(cuni("%d %s\n" % (level, token)))
def _header(self, filename):
"""
Write the GEDCOM header.
HEADER:=
n HEAD {1:1}
+1 SOUR <APPROVED_SYSTEM_ID> {1:1}
+2 VERS <VERSION_NUMBER> {0:1}
+2 NAME <NAME_OF_PRODUCT> {0:1}
+2 CORP <NAME_OF_BUSINESS> {0:1} # Not used
+3 <<ADDRESS_STRUCTURE>> {0:1} # Not used
+2 DATA <NAME_OF_SOURCE_DATA> {0:1} # Not used
+3 DATE <PUBLICATION_DATE> {0:1} # Not used
+3 COPR <COPYRIGHT_SOURCE_DATA> {0:1} # Not used
+1 DEST <RECEIVING_SYSTEM_NAME> {0:1*} # Not used
+1 DATE <TRANSMISSION_DATE> {0:1}
+2 TIME <TIME_VALUE> {0:1}
+1 SUBM @XREF:SUBM@ {1:1}
+1 SUBN @XREF:SUBN@ {0:1}
+1 FILE <FILE_NAME> {0:1}
+1 COPR <COPYRIGHT_GEDCOM_FILE> {0:1}
+1 GEDC {1:1}
+2 VERS <VERSION_NUMBER> {1:1}
+2 FORM <GEDCOM_FORM> {1:1}
+1 CHAR <CHARACTER_SET> {1:1}
+2 VERS <VERSION_NUMBER> {0:1}
+1 LANG <LANGUAGE_OF_TEXT> {0:1}
+1 PLAC {0:1}
+2 FORM <PLACE_HIERARCHY> {1:1}
+1 NOTE <GEDCOM_CONTENT_DESCRIPTION> {0:1}
+2 [CONT|CONC] <GEDCOM_CONTENT_DESCRIPTION> {0:M}
"""
local_time = time.localtime(time.time())
(year, mon, day, hour, minutes, sec) = local_time[0:6]
date_str = "%d %s %d" % (day, libgedcom.MONTH[mon], year)
time_str = "%02d:%02d:%02d" % (hour, minutes, sec)
rname = self.dbase.get_researcher().get_name()
self._writeln(0, "HEAD")
self._writeln(1, "SOUR", "Gramps")
self._writeln(2, "VERS", VERSION)
self._writeln(2, "NAME", "Gramps")
self._writeln(1, "DATE", date_str)
self._writeln(2, "TIME", time_str)
self._writeln(1, "SUBM", "@SUBM@")
self._writeln(1, "FILE", filename, limit=255)
self._writeln(1, "COPR", 'Copyright (c) %d %s.' % (year, rname))
self._writeln(1, "GEDC")
self._writeln(2, "VERS", "5.5")
self._writeln(2, "FORM", 'LINEAGE-LINKED')
self._writeln(1, "CHAR", "UTF-8")
# write the language string if the current LANG variable
# matches something we know about.
lang = os.getenv('LANG')
if lang and len(lang) >= 2:
lang_code = LANGUAGES.get(lang[0:2])
if lang_code:
self._writeln(1, 'LANG', lang_code)
def _submitter(self):
"""
n @<XREF:SUBM>@ SUBM {1:1}
+1 NAME <SUBMITTER_NAME> {1:1}
+1 <<ADDRESS_STRUCTURE>> {0:1}
+1 <<MULTIMEDIA_LINK>> {0:M} # not used
+1 LANG <LANGUAGE_PREFERENCE> {0:3} # not used
+1 RFN <SUBMITTER_REGISTERED_RFN> {0:1} # not used
+1 RIN <AUTOMATED_RECORD_ID> {0:1} # not used
+1 <<CHANGE_DATE>> {0:1} # not used
"""
owner = self.dbase.get_researcher()
name = owner.get_name()
addr = owner.get_address()
adr2 = owner.get_locality()
city = owner.get_city()
state = owner.get_state()
ctry = owner.get_country()
post = owner.get_postal_code()
phon = owner.get_phone()
mail = owner.get_email()
if not name :
name = cuni('Not Provided')
if not addr :
addr = cuni('Not Provided')
self._writeln(0, "@SUBM@", "SUBM")
self._writeln(1, "NAME", name)
self._writeln(1, "ADDR", addr)
if city and state and post:
self._writeln(2, "CONT", "%s, %s %s" % (city, state, post))
else:
self._writeln(2, "CONT", cuni("Not Provided"))
if addr:
self._writeln(2, "ADR1", addr)
if adr2:
self._writeln(2, "ADR2", adr2)
if city:
self._writeln(2, "CITY", city)
if state:
self._writeln(2, "STAE", state)
if post:
self._writeln(2, "POST", post)
if ctry:
self._writeln(2, "CTRY", ctry)
if phon:
self._writeln(1, "PHON", phon)
if mail:
self._writeln(1, "EMAIL", mail)
def _individuals(self):
"""
Write the individual people to the gedcom file.
Since people like to have the list sorted by ID value, we need to go
through a sorting step. We need to reset the progress bar, otherwise,
people will be confused when the progress bar is idle.
"""
self.reset(_("Writing individuals"))
self.progress_cnt += 1
self.update(self.progress_cnt)
phandles = self.dbase.iter_person_handles()
sorted_list = []
for handle in phandles:
person = self.dbase.get_person_from_handle(handle)
if person:
data = (person.get_gramps_id(), handle)
sorted_list.append(data)
sorted_list.sort()
for data in sorted_list:
self._person(self.dbase.get_person_from_handle(data[1]))
def _person(self, person):
"""
Write out a single person.
n @XREF:INDI@ INDI {1:1}
+1 RESN <RESTRICTION_NOTICE> {0:1} # not used
+1 <<PERSONAL_NAME_STRUCTURE>> {0:M}
+1 SEX <SEX_VALUE> {0:1}
+1 <<INDIVIDUAL_EVENT_STRUCTURE>> {0:M}
+1 <<INDIVIDUAL_ATTRIBUTE_STRUCTURE>> {0:M}
+1 <<LDS_INDIVIDUAL_ORDINANCE>> {0:M}
+1 <<CHILD_TO_FAMILY_LINK>> {0:M}
+1 <<SPOUSE_TO_FAMILY_LINK>> {0:M}
+1 SUBM @<XREF:SUBM>@ {0:M}
+1 <<ASSOCIATION_STRUCTURE>> {0:M}
+1 ALIA @<XREF:INDI>@ {0:M}
+1 ANCI @<XREF:SUBM>@ {0:M}
+1 DESI @<XREF:SUBM>@ {0:M}
+1 <<SOURCE_CITATION>> {0:M}
+1 <<MULTIMEDIA_LINK>> {0:M} ,*
+1 <<NOTE_STRUCTURE>> {0:M}
+1 RFN <PERMANENT_RECORD_FILE_NUMBER> {0:1}
+1 AFN <ANCESTRAL_FILE_NUMBER> {0:1}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
+2 TYPE <USER_REFERENCE_TYPE> {0:1}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
+1 <<CHANGE_DATE>> {0:1}
"""
if person is None:
return
self._writeln(0, "@%s@" % person.get_gramps_id(), "INDI")
self._names(person)
self._gender(person)
self._person_event_ref('BIRT', person.get_birth_ref())
self._person_event_ref('DEAT', person.get_death_ref())
self._remaining_events(person)
self._attributes(person)
self._lds_ords(person, 1)
self._child_families(person)
self._parent_families(person)
self._assoc(person, 1)
self._person_sources(person)
self._addresses(person)
self._photos(person.get_media_list(), 1)
self._url_list(person, 1)
self._note_references(person.get_note_list(), 1)
self._change(person.get_change_time(), 1)
def _assoc(self, person, level):
"""
n ASSO @<XREF:INDI>@ {0:M}
+1 RELA <RELATION_IS_DESCRIPTOR> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
"""
for ref in person.get_person_ref_list():
person = self.dbase.get_person_from_handle(ref.ref)
if person:
self._writeln(level, "ASSO", "@%s@" % person.get_gramps_id())
self._writeln(level+1, "RELA", ref.get_relation())
self._note_references(ref.get_note_list(), level+1)
self._source_references(ref.get_citation_list(), level+1)
def _note_references(self, notelist, level):
"""
Write out the list of note handles to the current level.
We use the Gramps ID as the XREF for the GEDCOM file.
"""
for note_handle in notelist:
note = self.dbase.get_note_from_handle(note_handle)
if note:
self._writeln(level, 'NOTE', '@%s@' % note.get_gramps_id())
def _names(self, person):
"""
Write the names associated with the person to the current level.
Since nicknames in version < 3.3 are separate from the name structure,
we search the attribute list to see if we can find a nickname.
Because we do not know the mappings, we just take the first nickname
we find, and add it to the primary name.
If a nickname is present in the name structure, it has precedence
"""
nicknames = [ attr.get_value() for attr in person.get_attribute_list()
if int(attr.get_type()) == AttributeType.NICKNAME ]
if len(nicknames) > 0:
nickname = nicknames[0]
else:
nickname = ""
self._person_name(person.get_primary_name(), nickname)
for name in person.get_alternate_names():
self._person_name(name, "")
def _gender(self, person):
"""
Write out the gender of the person to the file.
If the gender is not male or female, simply do not output anything.
The only valid values are M (male) or F (female). So if the geneder is
unknown, we output nothing.
"""
if person.get_gender() == Person.MALE:
self._writeln(1, "SEX", "M")
elif person.get_gender() == Person.FEMALE:
self._writeln(1, "SEX", "F")
def _lds_ords(self, obj, level):
"""
Simply loop through the list of LDS ordinances, and call the function
that writes the LDS ordinance structure.
"""
for lds_ord in obj.get_lds_ord_list():
self.write_ord(lds_ord, level)
def _remaining_events(self, person):
"""
Output all events associated with the person that are not BIRTH or
DEATH events.
Because all we have are event references, we have to
extract the real event to discover the event type.
"""
for event_ref in person.get_event_ref_list():
event = self.dbase.get_event_from_handle(event_ref.ref)
if event is None: continue
self._process_person_event(event, event_ref)
self._dump_event_stats(event, event_ref)
self._adoption_records(person)
def _process_person_event(self, event, event_ref):
"""
Process a person event, which is not a BIRTH or DEATH event.
"""
etype = int(event.get_type())
# if the event is a birth or death, skip it.
if etype in (EventType.BIRTH, EventType.DEATH):
return
role = int(event_ref.get_role())
# if the event role is not primary, skip the event.
if role != EventRoleType.PRIMARY:
return
val = libgedcom.PERSONALCONSTANTEVENTS.get(etype, "").strip()
if val and val.strip():
if val in NEEDS_PARAMETER:
if event.get_description().strip():
self._writeln(1, val, event.get_description())
else:
self._writeln(1, val)
else:
if event_has_subordinate_data(event, event_ref):
self._writeln(1, val)
else:
self._writeln(1, val, 'Y')
if event.get_description().strip():
self._writeln(2, 'TYPE', event.get_description())
else:
self._writeln(1, 'EVEN')
if val.strip():
self._writeln(2, 'TYPE', val)
else:
self._writeln(2, 'TYPE', str(event.get_type()))
descr = event.get_description()
if descr:
self._writeln(2, 'NOTE', "Description: " + descr)
def _adoption_records(self, person):
"""
Write Adoption events for each child that has been adopted.
n ADOP
+1 <<INDIVIDUAL_EVENT_DETAIL>>
+1 FAMC @<XREF:FAM>@
+2 ADOP <ADOPTED_BY_WHICH_PARENT>
"""
adoptions = []
for family in [ self.dbase.get_family_from_handle(fh)
for fh in person.get_parent_family_handle_list() ]:
if family is None:
continue
for child_ref in [ ref for ref in family.get_child_ref_list()
if ref.ref == person.handle ]:
if child_ref.mrel == ChildRefType.ADOPTED \
or child_ref.frel == ChildRefType.ADOPTED:
adoptions.append((family, child_ref.frel, child_ref.mrel))
for (fam, frel, mrel) in adoptions:
self._writeln(1, 'ADOP', 'Y')
self._writeln(2, 'FAMC', '@%s@' % fam.get_gramps_id())
if mrel == frel:
self._writeln(3, 'ADOP', 'BOTH')
elif mrel == ChildRefType.ADOPTED:
self._writeln(3, 'ADOP', 'WIFE')
else:
self._writeln(3, 'ADOP', 'HUSB')
def _attributes(self, person):
"""
Write out the attributes to the GEDCOM file.
Since we have already looked at nicknames when we generated the names,
we filter them out here.
We use the GEDCOM 5.5.1 FACT command to write out attributes not
built in to GEDCOM.
"""
# filter out the nicknames
attr_list = [ attr for attr in person.get_attribute_list()
if attr.get_type() != AttributeType.NICKNAME ]
for attr in attr_list:
attr_type = int(attr.get_type())
name = libgedcom.PERSONALCONSTANTATTRIBUTES.get(attr_type)
key = str(attr.get_type())
value = attr.get_value().strip().replace('\r', ' ')
if key in ("AFN", "RFN", "REFN", "_UID"):
self._writeln(1, key, value)
continue
if key == "RESN":
self._writeln(1, 'RESN')
continue
if name and name.strip():
self._writeln(1, name, value)
elif value:
self._writeln(1, 'FACT', value)
self._writeln(2, 'TYPE', key)
else:
continue
self._note_references(attr.get_note_list(), 2)
self._source_references(attr.get_citation_list(), 2)
def _source_references(self, citation_list, level):
"""
Loop through the list of citation handles, writing the information
to the file.
"""
for citation_handle in citation_list:
self._source_ref_record(level, citation_handle)
def _addresses(self, person):
"""
Write out the addresses associated with the person as RESI events.
"""
for addr in person.get_address_list():
self._writeln(1, 'RESI')
self._date(2, addr.get_date_object())
self._writeln(2, "ADDR", addr.get_street())
if addr.get_street():
self._writeln(3, 'ADR1', addr.get_street())
if addr.get_locality():
self._writeln(3, 'ADR2', addr.get_locality())
if addr.get_city():
self._writeln(3, 'CITY', addr.get_city())
if addr.get_state():
self._writeln(3, 'STAE', addr.get_state())
if addr.get_postal_code():
self._writeln(3, 'POST', addr.get_postal_code())
if addr.get_country():
self._writeln(3, 'CTRY', addr.get_country())
if addr.get_phone():
self._writeln(2, 'PHON', addr.get_phone())
self._note_references(addr.get_note_list(), 2)
self._source_references(addr.get_citation_list(), 2)
def _photos(self, media_list, level):
"""
Loop through the list of media objects, writing the information
to the file.
"""
for photo in media_list:
self._photo(photo, level)
def _child_families(self, person):
"""
Write the Gramps ID as the XREF for each family in which the person
is listed as a child.
"""
# get the list of familes from the handle list
family_list = [ self.dbase.get_family_from_handle(hndl)
for hndl in person.get_parent_family_handle_list() ]
for family in family_list:
if family:
self._writeln(1, 'FAMC', '@%s@' % family.get_gramps_id())
def _parent_families(self, person):
"""
Write the Gramps ID as the XREF for each family in which the person
is listed as a parent.
"""
# get the list of familes from the handle list
family_list = [ self.dbase.get_family_from_handle(hndl)
for hndl in person.get_family_handle_list() ]
for family in family_list:
if family:
self._writeln(1, 'FAMS', '@%s@' % family.get_gramps_id())
def _person_sources(self, person):
"""
Loop through the list of citations, writing the information
to the file.
"""
for citation_handle in person.get_citation_list():
self._source_ref_record(1, citation_handle)
def _url_list(self, obj, level):
"""
n OBJE {1:1}
+1 FORM <MULTIMEDIA_FORMAT> {1:1}
+1 TITL <DESCRIPTIVE_TITLE> {0:1}
+1 FILE <MULTIMEDIA_FILE_REFERENCE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
"""
for url in obj.get_url_list():
self._writeln(level, 'OBJE')
self._writeln(level+1, 'FORM', 'URL')
if url.get_description():
self._writeln(level+1, 'TITL', url.get_description())
if url.get_path():
self._writeln(level+1, 'FILE', url.get_path(), limit=255)
def _families(self):
"""
Write out the list of families, sorting by Gramps ID.
"""
self.reset(_("Writing families"))
self.progress_cnt += 1
self.update(self.progress_cnt)
# generate a list of (GRAMPS_ID, HANDLE) pairs. This list
# can then be sorted by the sort routine, which will use the
# first value of the tuple as the sort key.
sorted_list = sort_handles_by_id(self.dbase.get_family_handles(),
self.dbase.get_family_from_handle)
# loop through the sorted list, pulling of the handle. This list
# has already been sorted by GRAMPS_ID
for family_handle in [hndl[1] for hndl in sorted_list]:
self._family(self.dbase.get_family_from_handle(family_handle))
def _family(self, family):
"""
n @<XREF:FAM>@ FAM {1:1}
+1 RESN <RESTRICTION_NOTICE> {0:1)
+1 <<FAMILY_EVENT_STRUCTURE>> {0:M}
+1 HUSB @<XREF:INDI>@ {0:1}
+1 WIFE @<XREF:INDI>@ {0:1}
+1 CHIL @<XREF:INDI>@ {0:M}
+1 NCHI <COUNT_OF_CHILDREN> {0:1}
+1 SUBM @<XREF:SUBM>@ {0:M}
+1 <<LDS_SPOUSE_SEALING>> {0:M}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
"""
if family is None:
return
gramps_id = family.get_gramps_id()
self._writeln(0, '@%s@' % gramps_id, 'FAM' )
self._family_reference('HUSB', family.get_father_handle())
self._family_reference('WIFE', family.get_mother_handle())
self._lds_ords(family, 1)
self._family_events(family)
self._family_attributes(family.get_attribute_list(), 1)
self._family_child_list(family.get_child_ref_list())
self._source_references(family.get_citation_list(), 1)
self._photos(family.get_media_list(), 1)
self._note_references(family.get_note_list(), 1)
self._change(family.get_change_time(), 1)
def _family_child_list(self, child_ref_list):
"""
Write the child XREF values to the GEDCOM file.
"""
child_list = [
self.dbase.get_person_from_handle(cref.ref).get_gramps_id()
for cref in child_ref_list]
for gid in child_list:
if gid is None: continue
self._writeln(1, 'CHIL', '@%s@' % gid)
def _family_reference(self, token, person_handle):
"""
Write the family reference to the file.
This is either 'WIFE' or 'HUSB'. As usual, we use the Gramps ID as the
XREF value.
"""
if person_handle:
person = self.dbase.get_person_from_handle(person_handle)
if person:
self._writeln(1, token, '@%s@' % person.get_gramps_id())
def _family_events(self, family):
"""
Output the events associated with the family.
Because all we have are event references, we have to extract the real
event to discover the event type.
"""
for event_ref in family.get_event_ref_list():
event = self.dbase.get_event_from_handle(event_ref.ref)
if event is None: continue
self._process_family_event(event, event_ref)
self._dump_event_stats(event, event_ref)
def _process_family_event(self, event, event_ref):
"""
Process a single family event.
"""
etype = int(event.get_type())
val = libgedcom.FAMILYCONSTANTEVENTS.get(etype)
if val:
if event_has_subordinate_data(event, event_ref):
self._writeln(1, val)
else:
self._writeln(1, val, 'Y')
if event.get_type() == EventType.MARRIAGE:
self._family_event_attrs(event.get_attribute_list(), 2)
if event.get_description().strip() != "":
self._writeln(2, 'TYPE', event.get_description())
else:
self._writeln(1, 'EVEN')
the_type = str(event.get_type())
if the_type:
self._writeln(2, 'TYPE', the_type)
descr = event.get_description()
if descr:
self._writeln(2, 'NOTE', "Description: " + descr)
def _family_event_attrs(self, attr_list, level):
"""
Write the attributes associated with the family event.
The only ones we really care about are FATHER_AGE and MOTHER_AGE which
we translate to WIFE/HUSB AGE attributes.
"""
for attr in attr_list:
if attr.get_type() == AttributeType.FATHER_AGE:
self._writeln(level, 'HUSB')
self._writeln(level+1, 'AGE', attr.get_value())
elif attr.get_type() == AttributeType.MOTHER_AGE:
self._writeln(level, 'WIFE')
self._writeln(level+1, 'AGE', attr.get_value())
def _family_attributes(self, attr_list, level):
"""
Write out the attributes associated with a family to the GEDCOM file.
Since we have already looked at nicknames when we generated the names,
we filter them out here.
We use the GEDCOM 5.5.1 FACT command to write out attributes not
built in to GEDCOM.
"""
for attr in attr_list:
attr_type = int(attr.get_type())
name = libgedcom.FAMILYCONSTANTATTRIBUTES.get(attr_type)
value = attr.get_value().replace('\r', ' ')
if attr_type in ("AFN", "RFN", "REFN", "_UID"):
self._writeln(1, attr_type, value)
continue
if name and name.strip():
self._writeln(1, name, value)
continue
else:
self._writeln(1, 'FACT', value)
self._writeln(2, 'TYPE', str(attr.get_type()))
self._note_references(attr.get_note_list(), level+1)
self._source_references(attr.get_citation_list(),
level+1)
def _sources(self):
"""
Write out the list of sources, sorting by Gramps ID.
"""
self.reset(_("Writing sources"))
self.progress_cnt += 1
self.update(self.progress_cnt)
sorted_list = sort_handles_by_id(self.dbase.get_source_handles(),
self.dbase.get_source_from_handle)
for (source_id, handle) in sorted_list:
source = self.dbase.get_source_from_handle(handle)
if source is None: continue
self._writeln(0, '@%s@' % source_id, 'SOUR')
if source.get_title():
self._writeln(1, 'TITL', source.get_title())
if source.get_author():
self._writeln(1, "AUTH", source.get_author())
if source.get_publication_info():
self._writeln(1, "PUBL", source.get_publication_info())
if source.get_abbreviation():
self._writeln(1, 'ABBR', source.get_abbreviation())
self._photos(source.get_media_list(), 1)
for reporef in source.get_reporef_list():
self._reporef(reporef, 1)
break
self._note_references(source.get_note_list(), 1)
self._change(source.get_change_time(), 1)
def _notes(self):
"""
Write out the list of notes, sorting by Gramps ID.
"""
self.reset(_("Writing notes"))
self.progress_cnt += 1
self.update(self.progress_cnt)
sorted_list = sort_handles_by_id(self.dbase.get_note_handles(),
self.dbase.get_note_from_handle)
for note_handle in [hndl[1] for hndl in sorted_list]:
note = self.dbase.get_note_from_handle(note_handle)
if note is None: continue
self._note_record(note)
def _note_record(self, note):
"""
n @<XREF:NOTE>@ NOTE <SUBMITTER_TEXT> {1:1}
+1 [ CONC | CONT] <SUBMITTER_TEXT> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
+2 TYPE <USER_REFERENCE_TYPE> {0:1}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
+1 <<CHANGE_DATE>> {0:1}
"""
if note:
self._writeln(0, '@%s@' % note.get_gramps_id(), 'NOTE ' + note.get())
def _repos(self):
"""
Write out the list of repositories, sorting by Gramps ID.
REPOSITORY_RECORD:=
n @<XREF:REPO>@ REPO {1:1}
+1 NAME <NAME_OF_REPOSITORY> {1:1}
+1 <<ADDRESS_STRUCTURE>> {0:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
+2 TYPE <USER_REFERENCE_TYPE> {0:1}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
+1 <<CHANGE_DATE>> {0:1}
"""
self.reset(_("Writing repositories"))
self.progress_cnt += 1
self.update(self.progress_cnt)
sorted_list = sort_handles_by_id(self.dbase.get_repository_handles(),
self.dbase.get_repository_from_handle)
# GEDCOM only allows for a single repository per source
for (repo_id, handle) in sorted_list:
repo = self.dbase.get_repository_from_handle(handle)
if repo is None: continue
self._writeln(0, '@%s@' % repo_id, 'REPO' )
if repo.get_name():
self._writeln(1, 'NAME', repo.get_name())
for addr in repo.get_address_list():
self._writeln(1, "ADDR", addr.get_street())
if addr.get_street():
self._writeln(2, 'ADR1', addr.get_street())
if addr.get_locality():
self._writeln(2, 'ADR2', addr.get_locality())
if addr.get_city():
self._writeln(2, 'CITY', addr.get_city())
if addr.get_state():
self._writeln(2, 'STAE', addr.get_state())
if addr.get_postal_code():
self._writeln(2, 'POST', addr.get_postal_code())
if addr.get_country():
self._writeln(2, 'CTRY', addr.get_country())
if addr.get_phone():
self._writeln(1, 'PHON', addr.get_phone())
for url in repo.get_url_list():
if int(url.get_type()) == UrlType.EMAIL:
self._writeln(1, 'EMAIL', url.get_path())
elif int(url.get_type()) == UrlType.WEB_HOME:
self._writeln(1, 'WWW', url.get_path())
self._note_references(repo.get_note_list(), 1)
def _reporef(self, reporef, level):
"""
n REPO [ @XREF:REPO@ | <NULL>] {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 CALN <SOURCE_CALL_NUMBER> {0:M}
+2 MEDI <SOURCE_MEDIA_TYPE> {0:1}
"""
if reporef.ref is None:
return
repo = self.dbase.get_repository_from_handle(reporef.ref)
if repo is None:
return
repo_id = repo.get_gramps_id()
self._writeln(level, 'REPO', '@%s@' % repo_id )
self._note_references(reporef.get_note_list(), level+1)
if reporef.get_call_number():
self._writeln(level+1, 'CALN', reporef.get_call_number() )
if reporef.get_media_type():
self._writeln(level+2, 'MEDI', str(reporef.get_media_type()))
def _person_event_ref(self, key, event_ref):
"""
Write out the BIRTH and DEATH events for the person.
"""
if event_ref:
event = self.dbase.get_event_from_handle(event_ref.ref)
if event_has_subordinate_data(event, event_ref):
self._writeln(1, key)
else:
self._writeln(1, key, 'Y')
if event.get_description().strip() != "":
self._writeln(2, 'TYPE', event.get_description())
self._dump_event_stats(event, event_ref)
def _change(self, timeval, level):
"""
CHANGE_DATE:=
n CHAN {1:1}
+1 DATE <CHANGE_DATE> {1:1}
+2 TIME <TIME_VALUE> {0:1}
+1 <<NOTE_STRUCTURE>> # not used
"""
self._writeln(level, 'CHAN')
time_val = time.localtime(timeval)
self._writeln(level+1, 'DATE', '%d %s %d' % (
time_val[2], libgedcom.MONTH[time_val[1]], time_val[0]))
self._writeln(level+2, 'TIME', '%02d:%02d:%02d' % (
time_val[3], time_val[4], time_val[5]))
def _dump_event_stats(self, event, event_ref):
"""
Write the event details for the event, using the event and event
reference information.
GEDCOM does not make a distinction between the two.
"""
dateobj = event.get_date_object()
self._date(2, dateobj)
if self._datewritten:
# write out TIME if present
times = [ attr.get_value() for attr in event.get_attribute_list()
if int(attr.get_type()) == AttributeType.TIME ]
# Not legal, but inserted by PhpGedView
if len(times) > 0:
time = times[0]
self._writeln(3, 'TIME', time)
place = None
if event.get_place_handle():
place = self.dbase.get_place_from_handle(event.get_place_handle())
self._place(place, 2)
for attr in event.get_attribute_list():
attr_type = attr.get_type()
if attr_type == AttributeType.CAUSE:
self._writeln(2, 'CAUS', attr.get_value())
elif attr_type == AttributeType.AGENCY:
self._writeln(2, 'AGNC', attr.get_value())
for attr in event_ref.get_attribute_list():
attr_type = attr.get_type()
if attr_type == AttributeType.AGE:
self._writeln(2, 'AGE', attr.get_value())
elif attr_type == AttributeType.FATHER_AGE:
self._writeln(2, 'HUSB')
self._writeln(3, 'AGE', attr.get_value())
elif attr_type == AttributeType.MOTHER_AGE:
self._writeln(2, 'WIFE')
self._writeln(3, 'AGE', attr.get_value())
self._note_references(event.get_note_list(), 2)
self._source_references(event.get_citation_list(), 2)
self._photos(event.get_media_list(), 2)
if place:
self._photos(place.get_media_list(), 2)
def write_ord(self, lds_ord, index):
"""
LDS_INDIVIDUAL_ORDINANCE:=
[
n [ BAPL | CONL ] {1:1}
+1 DATE <DATE_LDS_ORD> {0:1}
+1 TEMP <TEMPLE_CODE> {0:1}
+1 PLAC <PLACE_LIVING_ORDINANCE> {0:1}
+1 STAT <LDS_BAPTISM_DATE_STATUS> {0:1}
+2 DATE <CHANGE_DATE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M} p.39
|
n ENDL {1:1}
+1 DATE <DATE_LDS_ORD> {0:1}
+1 TEMP <TEMPLE_CODE> {0:1}
+1 PLAC <PLACE_LIVING_ORDINANCE> {0:1}
+1 STAT <LDS_ENDOWMENT_DATE_STATUS> {0:1}
+2 DATE <CHANGE_DATE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
|
n SLGC {1:1}
+1 DATE <DATE_LDS_ORD> {0:1}
+1 TEMP <TEMPLE_CODE> {0:1}
+1 PLAC <PLACE_LIVING_ORDINANCE> {0:1}
+1 FAMC @<XREF:FAM>@ {1:1}
+1 STAT <LDS_CHILD_SEALING_DATE_STATUS> {0:1}
+2 DATE <CHANGE_DATE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
]
"""
self._writeln(index, LDS_ORD_NAME[lds_ord.get_type()])
self._date(index + 1, lds_ord.get_date_object())
if lds_ord.get_family_handle():
family_handle = lds_ord.get_family_handle()
family = self.dbase.get_family_from_handle(family_handle)
if family:
self._writeln(index+1, 'FAMC', '@%s@' % family.get_gramps_id())
if lds_ord.get_temple():
self._writeln(index+1, 'TEMP', lds_ord.get_temple())
if lds_ord.get_place_handle():
self._place(
self.dbase.get_place_from_handle(lds_ord.get_place_handle()), 2)
if lds_ord.get_status() != LdsOrd.STATUS_NONE:
self._writeln(2, 'STAT', LDS_STATUS[lds_ord.get_status()])
self._note_references(lds_ord.get_note_list(), index+1)
self._source_references(lds_ord.get_citation_list(), index+1)
def _date(self, level, date):
"""
Write the 'DATE' GEDCOM token, along with the date in GEDCOM's
expected format.
"""
self._datewritten = True
start = date.get_start_date()
if start != Date.EMPTY:
cal = date.get_calendar()
mod = date.get_modifier()
quality = date.get_quality()
if mod == Date.MOD_SPAN:
val = "FROM %s TO %s" % (
libgedcom.make_gedcom_date(start, cal, mod, quality),
libgedcom.make_gedcom_date(date.get_stop_date(),
cal, mod, quality))
elif mod == Date.MOD_RANGE:
val = "BET %s AND %s" % (
libgedcom.make_gedcom_date(start, cal, mod, quality),
libgedcom.make_gedcom_date(date.get_stop_date(),
cal, mod, quality))
else:
val = libgedcom.make_gedcom_date(start, cal, mod, quality)
self._writeln(level, 'DATE', val)
elif date.get_text():
self._writeln(level, 'DATE', date.get_text())
else:
self._datewritten = False
def _person_name(self, name, attr_nick):
"""
n NAME <NAME_PERSONAL> {1:1}
+1 NPFX <NAME_PIECE_PREFIX> {0:1}
+1 GIVN <NAME_PIECE_GIVEN> {0:1}
+1 NICK <NAME_PIECE_NICKNAME> {0:1}
+1 SPFX <NAME_PIECE_SURNAME_PREFIX {0:1}
+1 SURN <NAME_PIECE_SURNAME> {0:1}
+1 NSFX <NAME_PIECE_SUFFIX> {0:1}
+1 <<SOURCE_CITATION>> {0:M}
+1 <<NOTE_STRUCTURE>> {0:M}
"""
gedcom_name = name.get_gedcom_name()
firstname = name.get_first_name().strip()
surns = []
surprefs = []
for surn in name.get_surname_list():
surns.append(surn.get_surname().replace('/', '?'))
if surn.get_connector():
#we store connector with the surname
surns[-1] = surns[-1] + ' ' + surn.get_connector()
surprefs.append(surn.get_prefix().replace('/', '?'))
surname = ', '.join(surns)
surprefix = ', '.join(surprefs)
suffix = name.get_suffix()
title = name.get_title()
nick = name.get_nick_name()
if nick.strip() == '':
nick = attr_nick
self._writeln(1, 'NAME', gedcom_name)
if int(name.get_type()) == NameType.BIRTH:
pass
elif int(name.get_type()) == NameType.MARRIED:
self._writeln(2, 'TYPE', 'married')
elif int(name.get_type()) == NameType.AKA:
self._writeln(2, 'TYPE', 'aka')
else:
self._writeln(2, 'TYPE', name.get_type().xml_str())
if firstname:
self._writeln(2, 'GIVN', firstname)
if surprefix:
self._writeln(2, 'SPFX', surprefix)
if surname:
self._writeln(2, 'SURN', surname)
if name.get_suffix():
self._writeln(2, 'NSFX', suffix)
if name.get_title():
self._writeln(2, 'NPFX', title)
if nick:
self._writeln(2, 'NICK', nick)
self._source_references(name.get_citation_list(), 2)
self._note_references(name.get_note_list(), 2)
def _source_ref_record(self, level, citation_handle):
"""
n SOUR @<XREF:SOUR>@ /* pointer to source record */ {1:1}
+1 PAGE <WHERE_WITHIN_SOURCE> {0:1}
+1 EVEN <EVENT_TYPE_CITED_FROM> {0:1}
+2 ROLE <ROLE_IN_EVENT> {0:1}
+1 DATA {0:1}
+2 DATE <ENTRY_RECORDING_DATE> {0:1}
+2 TEXT <TEXT_FROM_SOURCE> {0:M}
+3 [ CONC | CONT ] <TEXT_FROM_SOURCE> {0:M}
+1 QUAY <CERTAINTY_ASSESSMENT> {0:1}
+1 <<MULTIMEDIA_LINK>> {0:M} ,*
+1 <<NOTE_STRUCTURE>> {0:M}
"""
citation = self.dbase.get_citation_from_handle(citation_handle)
src_handle = citation.get_reference_handle()
if src_handle is None:
return
src = self.dbase.get_source_from_handle(src_handle)
if src is None:
return
# Reference to the source
self._writeln(level, "SOUR", "@%s@" % src.get_gramps_id())
if citation.get_page() != "":
# PAGE <WHERE_WITHIN_SOURCE> can not have CONC lines.
# WHERE_WITHIN_SOURCE:= {Size=1:248}
# Maximize line to 248 and set limit to 248, for no line split
self._writeln(level+1, 'PAGE', citation.get_page()[0:248],
limit=248)
conf = min(citation.get_confidence_level(),
Citation.CONF_VERY_HIGH)
if conf != Citation.CONF_NORMAL and conf != -1:
self._writeln(level+1, "QUAY", QUALITY_MAP[conf])
if not citation.get_date_object().is_empty():
self._writeln(level+1, 'DATA')
self._date(level+2, citation.get_date_object())
if len(citation.get_note_list()) > 0:
note_list = [ self.dbase.get_note_from_handle(h)
for h in citation.get_note_list() ]
note_list = [ n for n in note_list
if n.get_type() == NoteType.SOURCE_TEXT]
if note_list:
ref_text = note_list[0].get()
else:
ref_text = ""
if ref_text != "" and citation.get_date_object().is_empty():
self._writeln(level+1, 'DATA')
if ref_text != "":
self._writeln(level+2, "TEXT", ref_text)
note_list = [ self.dbase.get_note_from_handle(h)
for h in citation.get_note_list() ]
note_list = [ n.handle for n in note_list
if n and n.get_type() != NoteType.SOURCE_TEXT]
self._note_references(note_list, level+1)
self._photos(citation.get_media_list(), level+1)
if "EVEN" in list(citation.get_data_map().keys()):
self._writeln(level+1, "EVEN", citation.get_data_map()["EVEN"])
if "EVEN:ROLE" in list(citation.get_data_map().keys()):
self._writeln(level+2, "ROLE",
citation.get_data_map()["EVEN:ROLE"])
def _photo(self, photo, level):
"""
n OBJE {1:1}
+1 FORM <MULTIMEDIA_FORMAT> {1:1}
+1 TITL <DESCRIPTIVE_TITLE> {0:1}
+1 FILE <MULTIMEDIA_FILE_REFERENCE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
"""
photo_obj_id = photo.get_reference_handle()
photo_obj = self.dbase.get_object_from_handle(photo_obj_id)
if photo_obj:
mime = photo_obj.get_mime_type()
form = MIME2GED.get(mime, mime)
path = media_path_full(self.dbase, photo_obj.get_path())
if not os.path.isfile(path):
return
self._writeln(level, 'OBJE')
if form:
self._writeln(level+1, 'FORM', form)
self._writeln(level+1, 'TITL', photo_obj.get_description())
self._writeln(level+1, 'FILE', path, limit=255)
self._note_references(photo_obj.get_note_list(), level+1)
def _place(self, place, level):
"""
PLACE_STRUCTURE:=
n PLAC <PLACE_NAME> {1:1}
+1 FORM <PLACE_HIERARCHY> {0:1}
+1 FONE <PLACE_PHONETIC_VARIATION> {0:M} # not used
+2 TYPE <PHONETIC_TYPE> {1:1}
+1 ROMN <PLACE_ROMANIZED_VARIATION> {0:M} # not used
+2 TYPE <ROMANIZED_TYPE> {1:1}
+1 MAP {0:1}
+2 LATI <PLACE_LATITUDE> {1:1}
+2 LONG <PLACE_LONGITUDE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
"""
if place is None: return
place_name = place.get_title()
self._writeln(level, "PLAC", place_name.replace('\r', ' '), limit=120)
longitude = place.get_longitude()
latitude = place.get_latitude()
if longitude and latitude:
(latitude, longitude) = conv_lat_lon(latitude, longitude, "GEDCOM")
if longitude and latitude:
self._writeln(level+1, "MAP")
self._writeln(level+2, 'LATI', latitude)
self._writeln(level+2, 'LONG', longitude)
# The Gedcom standard shows that an optional address structure can
# be written out in the event detail.
# http://homepages.rootsweb.com/~pmcbride/gedcom/55gcch2.htm#EVENT_DETAIL
location = place.get_main_location()
if location and not location.is_empty():
self._writeln(level, "ADDR", location.get_street())
if location.get_street():
self._writeln(level + 1, 'ADR1', location.get_street())
if location.get_locality():
self._writeln(level + 1, 'ADR2', location.get_locality())
if location.get_city():
self._writeln(level + 1, 'CITY', location.get_city())
if location.get_state():
self._writeln(level + 1, 'STAE', location.get_state())
if location.get_postal_code():
self._writeln(level + 1, 'POST', location.get_postal_code())
if location.get_country():
self._writeln(level + 1, 'CTRY', location.get_country())
if location.get_phone():
self._writeln(level, 'PHON', location.get_phone())
self._note_references(place.get_note_list(), level+1)
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
def export_data(database, filename, user, option_box=None):
"""
External interface used to register with the plugin system.
"""
ret = False
try:
ged_write = GedcomWriter(database, user, option_box)
ret = ged_write.write_gedcom_file(filename)
except IOError as msg:
msg2 = _("Could not create %s") % filename
user.notify_error(msg2, str(msg))
except DatabaseError as msg:
user.notify_db_error(_("Export failed"), str(msg))
return ret
|
Forage/Gramps
|
gramps/plugins/export/exportgedcom.py
|
Python
|
gpl-2.0
| 54,688
|
[
"Brian"
] |
14f94dae93c20f7ed8d7692545b90ce7ae58b2c58716cbf47be0e7fdb2bbbabb
|
"""
Uniweb validator project
wave.py : Checks URL against WAVE validator in various ways
Copyright (c) 2009 Brian Shumate
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import datetime
import httplib2
import urllib2
from datetime import timedelta
from BeautifulSoup import BeautifulSoup
from surfbot.validator.models import Website
def checkaccess(site_pk):
"""
Checks URL against WAVE validator website for accesibility
FIXME: how do we internalize this?!
"""
today = datetime.date.today()
wave = "http://wave.webaim.org/report?url="
w = Website.objects.get(pk=site_pk)
w.checkok = False
w.accessval = 'Fail'
w.lastcheck = today
week = timedelta(days=7)
day = timedelta(hours=24)
h = httplib2.Http(".cache")
resp, content = h.request(wave + w.rooturl, "GET")
validator = BeautifulSoup(content)
if validator.find('h1', id='wave4errormessage'):
w.checkok = True
w.nextcheck = today + day
w.checktotal += 1
w.accessval = 'Fail'
w.accessval_fcount += 1
w.save()
return w.accessval
else:
w.checkok = True
w.nextcheck = today + week
w.checktotal += 1
w.accessval = 'Pass'
w.accessval_pcount += 1
w.save()
return w.accessval
|
brianshumate/uniweb
|
surfbot/utils/wave.py
|
Python
|
bsd-2-clause
| 2,287
|
[
"Brian"
] |
6f1a57bc573e09cf7158663ed1b2357649466776ffaf14d3fb69e97f8756dfcb
|
# coding=utf-8
import asyncio
import webbrowser
import logging
import os
import sys
import time
from random import randint
from flask import Flask, render_template_string, send_from_directory, request
from amber.web_modules.sockets import Socket
from amber.web_modules.web_utils import threaded
MODULE_DIR = os.path.abspath(os.path.dirname(__file__))
FRONTEND_DIR = os.path.join(MODULE_DIR, "..", "frontend")
GAME_DIR = os.path.abspath(os.path.dirname(sys.argv[0]))
HOST = "localhost"
FLASK_PORT = randint(8560, 8566)
SOCKET_PORT = randint(8567, 8573)
app = Flask(__name__)
loop = asyncio.get_event_loop()
# socket = Socket(loop, HOST, SOCKET_PORT)
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
logging.getLogger("werkzeug").setLevel(logging.WARNING)
@app.route("/")
def main_page():
with open(os.path.join(FRONTEND_DIR, "index.html"), "r") as file:
return render_template_string(file.read(), host=HOST, port=SOCKET_PORT)
# ONLY FOR DEVELOPMENT!
@app.after_request
def add_header(r):
"""
Removes caching
"""
if request.path != "/":
return r
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate, max-age=0"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
# r.headers['Cache-Control'] = 'public, max-age=0'
return r
@app.route("/<path:url>")
def simplify(url: str):
if url.startswith(("assets/", "/assets")):
l_path, filename = os.path.split(url)
return send_from_directory(os.path.join(FRONTEND_DIR, l_path), filename)
else:
f_path, fn = os.path.split(os.path.join(GAME_DIR, url))
return send_from_directory(f_path, fn)
@threaded
def _run_flask():
app.run(debug=False, host=HOST, port=FLASK_PORT)
def run_web(amber_inst, open_browser=True):
try:
log.info("Starting flask server...")
_run_flask()
log.info("Flask server running")
socket = Socket(amber_inst, loop, HOST, SOCKET_PORT)
page_url = "http://{}:{}".format(HOST, FLASK_PORT)
# no async here
@threaded
def open_br():
time.sleep(1)
webbrowser.open(page_url)
if open_browser:
open_br()
log.info("Serving to {}".format(page_url))
loop.create_task(socket.start(amber_inst))
loop.run_forever()
except:
log.critical("Loop exception raised, exiting")
raise
|
DefaltSimon/Amber
|
amber/web_modules/web_core.py
|
Python
|
mit
| 2,425
|
[
"Amber"
] |
f2af18f26e73fcb582db24e7cd8238d679a4876917bf23640eecd4d26dd930a4
|
import pytest
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR, convertToReturnValue, returnValueOrRaise
def test_Ok():
retVal = S_OK("Hello world")
assert retVal["OK"] is True
assert retVal["Value"] == "Hello world"
def test_Error():
retVal = S_ERROR("This is bad")
assert retVal["OK"] is False
assert retVal["Message"] == "This is bad"
callStack = "".join(retVal["CallStack"])
assert "Test_ReturnValues" in callStack
assert "test_Error" in callStack
def test_ErrorWithCustomTraceback():
retVal = S_ERROR("This is bad", callStack=["My callstack"])
assert retVal["OK"] is False
assert retVal["Message"] == "This is bad"
assert retVal["CallStack"] == ["My callstack"]
class CustomException(Exception):
pass
@convertToReturnValue
def _happyFunction():
return {"12345": "Success"}
@convertToReturnValue
def _sadFunction():
raise CustomException("I am sad")
return {}
def test_convertToReturnValue():
retVal = _happyFunction()
assert retVal["OK"] is True
assert retVal["Value"] == {"12345": "Success"}
# Make sure exceptions are captured correctly
retVal = _sadFunction()
assert retVal["OK"] is False
assert "CustomException" in retVal["Message"]
# Make sure the exception is re-raised
with pytest.raises(CustomException):
returnValueOrRaise(_sadFunction())
|
DIRACGrid/DIRAC
|
src/DIRAC/Core/Utilities/test/Test_ReturnValues.py
|
Python
|
gpl-3.0
| 1,392
|
[
"DIRAC"
] |
0383d02f07aa19714c50a36c7c61aaa2c8152faffede30308572b1ccb29275a9
|
import cairo
import pango
import pangocairo as pc
import gtk
import math
from .structures import Size, Position, Rectangle, Color, BorderRadius, Padding, Gradient, RadialGradient
from ..events.events import WindowEventSource
from ..logger import log
from datetime import datetime
from jgui.settings import DEBUG as debug
class Surface(WindowEventSource):
def __init__(self, size=None, context=None, data=None, render_mouse=True, show_fps=False):
super(Surface, self).__init__()
self.show_fps = show_fps
self.size = Size.from_value(size)
self.mouse_pos = Position()
self.drawing = False
if context is None:
if data is not None:
self.csurface = cairo.ImageSurface.create_for_data(data, cairo.FORMAT_ARGB32, self.size.width, self.size.height)
else:
self.csurface = cairo.ImageSurface(cairo.FORMAT_ARGB32, self.size.width, self.size.height)
self.context = cairo.Context(self.csurface)
else:
self.context = context
self.root_window = Window('root', position=Position(0,0), size=self.size, context=self.context, surface=self, ignore_debug=True)
self.render_mouse = render_mouse
if self.render_mouse:
self.mouse_icon = Mouse('mouse', position=Position(0, 0), size=Size(12,20), context=self.context, surface=self)
self.fps_counter = TextWindow('fps', '0 fps', position=Position(self.size.width-70, 10), size=Size(80,20), context=self.context, surface=self)
self.windows = [self.root_window, self.mouse_icon, self.fps_counter]
self.current_hover_window = self.root_window
self.current_focused_window = self.root_window
self.dt = 0
self.old_time = datetime.now()
self.dtindex = 0
self.max_samples = 60
self.dtlist = [0]*self.max_samples
self.accept('mouse-move', self.process_mouse_move)
def setTopZero(self, context):
context.identity_matrix()
matrix = cairo.Matrix(1, 0, 0,
1, 0, 0)
context.transform(matrix)
def inject_mouse_down(self, button):
self.current_focused_window = self.current_hover_window
self.root_window.inject_mouse_down(button)
def inject_mouse_double(self, button):
self.root_window.inject_mouse_double(button)
def inject_mouse_wheel(self, value):
self.root_window.inject_mouse_wheel(value)
def inject_mouse_up(self, button):
self.root_window.inject_mouse_up(button)
def inject_mouse_position(self, pos):
mouse_pos = Position.from_value(pos)
diff = mouse_pos - self.mouse_pos
old_pos = self.mouse_pos
self.mouse_pos = mouse_pos
if diff != Position(0,0):
self.dispatch('mouse-move', self, old_pos, self.mouse_pos)
if self.render_mouse:
self.mouse_icon.position = pos
self.root_window.inject_mouse_position(pos)
def mouse_inside(self):
"""
Checks if the mouse is inside the window taking into account
all other windows and draw priorities.
"""
#Check all children of root to see if there is a higher priority
#window than the current one
root = self.root_window
stack = [root]
visited = set()
while stack:
item = stack[-1]
rec = item.rectangle
clip_parent = item.get_clip_parent()
if clip_parent is not None:
rec = item.rectangle.intersection(clip_parent.rectangle)
intersects_mouse = rec.intersects_with(self.mouse_pos)
if item.children and not set(item.children).issubset(visited):
stack.extend(item.children)
else:
if intersects_mouse:
return item
visited.add(item)
stack.pop()
return None
def process_mouse_move(self, obj, old_mpos, new_mpos):
if not self.current_hover_window or not self.current_hover_window.mouse_down:
self.current_hover_window = self.mouse_inside()
def notify_window_resize(self, width, height):
self.size = Size(width, height)
self.root_window.size = self.size
self.csurface = cairo.ImageSurface(cairo.FORMAT_ARGB32, self.size.width, self.size.height)
self.context = cairo.Context(self.csurface)
stack = self.windows[:]
while stack:
item = stack.pop()
item.context = self.context
if item.children:
stack.extend(item.children)
def calcfps(self, dt):
self.dtlist[self.dtindex] = float(dt)
self.dtindex += 1
if self.dtindex >= self.max_samples:
self.dtindex = 0
return 1.0/(sum(self.dtlist)/float(self.max_samples))
def draw(self):
self.drawing = True
if self.show_fps:
self.dt = (datetime.now() - self.old_time).total_seconds()
self.old_time = datetime.now()
fps = self.calcfps(self.dt)
self.fps_counter.text = '{} fps'.format(round(fps))
self.context.set_operator(cairo.OPERATOR_CLEAR)
self.context.rectangle(0.0, 0.0, self.size.width, self.size.height)
self.context.fill()
self.context.set_operator(cairo.OPERATOR_OVER)
for window in self.windows:
window.draw()
self.drawing = False
class WindowSurface(object):
line_joins = {'miter': cairo.LINE_JOIN_MITER,
'round': cairo.LINE_JOIN_ROUND,
'bevel': cairo.LINE_JOIN_BEVEL}
line_caps = {'round': cairo.LINE_CAP_ROUND,
'butt': cairo.LINE_CAP_BUTT,
'square': cairo.LINE_CAP_SQUARE}
font_weights = {'bold': pango.WEIGHT_BOLD,
'normal': pango.WEIGHT_NORMAL,
'book': pango.WEIGHT_BOOK,
'heavy': pango.WEIGHT_HEAVY,
'light': pango.WEIGHT_LIGHT,
'medium': pango.WEIGHT_MEDIUM,
'semibold': pango.WEIGHT_SEMIBOLD,
'thin': pango.WEIGHT_THIN,
'ultrabold': pango.WEIGHT_ULTRABOLD,
'ultraheavy': pango.WEIGHT_ULTRAHEAVY,
'ultralight': pango.WEIGHT_ULTRALIGHT}
font_styles = {'italic': pango.STYLE_ITALIC,
'oblique': pango.STYLE_OBLIQUE,
'normal': pango.STYLE_NORMAL}
wrap_modes = {'word': pango.WRAP_WORD,
'char': pango.WRAP_CHAR,
'word_char': pango.WRAP_WORD_CHAR}
font_map = pc.cairo_font_map_get_default()
font_list = [f.get_name() for f in font_map.list_families()]
filters = {'none': cairo.FILTER_FAST,
'good': cairo.FILTER_GOOD,
'best': cairo.FILTER_BEST,
'bilinear': cairo.FILTER_BILINEAR,
'gaussian': cairo.FILTER_GAUSSIAN,
'nearest' : cairo.FILTER_NEAREST}
def __init__(self):
super(WindowSurface, self).__init__()
def load_image(self, image_path):
if image_path is not None:
if isinstance(image_path, basestring):
return gtk.gdk.pixbuf_new_from_file(image_path)
else:
return image_path
def draw_circle(self, position, size, color=(1,1,1,1), line_width=1.0, line_color=(0,0,0,1), start_angle=0.0, end_angle=360.0):
color = Color.from_value(color)
line_color = Color.from_value(line_color)
context = self.surface.context
position = Position.from_value(position)
size = Size.from_value(size)
width = size.width
height = size.height
x,y = (self.position.x+position.x+width/2,
self.position.y+position.y+height/2)
context.set_line_width(line_width)
context.save()
context.translate(x, y)
context.scale(width/2.0-line_width/2.0, height/2.0-line_width/2.0)
context.arc(0, 0, 1, start_angle, end_angle * math.pi/180.0)
context.restore()
context.set_source_rgba(color.r, color.g, color.b, color.a)
context.fill_preserve()
context.set_source_rgba(line_color.r, line_color.g, line_color.b, line_color.a)
context.stroke()
def draw_image(self, image, position, size,
filter='none',
stretch_horizontal=False,
stretch_vertical=False,
keep_ratio=False,
center_horizontal=True,
center_vertical=True, image_offset=(0, 0)):
context = self.surface.context
position = Position.from_value(position)
offset = Position.from_value(image_offset)
size = Size.from_value(size)
width = size.width
height = size.height
x,y = (self.position.x+position.x,
self.position.y+position.y)
if isinstance(image, basestring):
image = self.load_image(image)
im_width = image.get_width()
im_height = image.get_height()
new_height = height
new_width = width
if keep_ratio:
aspect_ratio = im_width/float(im_height)
if width >= height:
if im_height < im_width:
new_height = width/aspect_ratio
else:
new_width = aspect_ratio * height
else:
if im_height > im_width:
new_width = aspect_ratio * height
else:
new_height = width/aspect_ratio
if center_horizontal:
x += width/2.0 - new_width/2.0
if x < self.position.x:
x = self.position.x
if center_vertical:
y += height/2.0 - new_height/2.0
if y < self.position.y:
y = self.position.y
im_surf = cairo.ImageSurface(cairo.FORMAT_ARGB32, int(new_width), int(new_height))
sp = cairo.SurfacePattern(im_surf)
ct2 = cairo.Context(im_surf)
ct2.set_source(sp)
ct3 = gtk.gdk.CairoContext(ct2)
new_scale_x = 1
new_scale_y = 1
if stretch_horizontal or keep_ratio:
new_scale_x = new_width/float(im_width)
if stretch_vertical or keep_ratio:
new_scale_y = new_height/float(im_height)
ct3.scale(new_scale_x, new_scale_y)
ct3.set_source_pixbuf(image, -offset.x, -offset.y)
ct3.get_source().set_filter(self.filters[filter])
ct3.paint()
context.set_source_surface(im_surf,x,y)
context.paint()
def draw_text(self, text, position, font_size=12,
font_weight='normal',
font_style='normal', font_color=(0,0,0,1),
font_family='Sans', word_wrap='word',
alignment=pango.ALIGN_LEFT, line_width=1.0,
background_color=(1,1,1,0), fill_color=None):
color = Color.from_value(font_color)
background_color = Color.from_value(background_color)
position = Position.from_value(position)
context = self.surface.context
font_weight = self.font_weights[font_weight]
font_style = self.font_styles[font_style]
pc_context = pc.CairoContext(context)
pc_context.set_antialias(cairo.ANTIALIAS_SUBPIXEL)
layout = pc_context.create_layout()
font = pango.FontDescription('{} {}'.format(font_family, font_size))
font.set_weight(font_weight)
font.set_style(font_style)
layout.set_font_description(font)
layout.set_text(text)
layout.set_wrap(self.wrap_modes[word_wrap])
width = self.size.width - (self.padding.left + self.padding.right)
layout.set_width(int(width*pango.SCALE))
layout.set_alignment(alignment)
context.set_line_width(line_width)
context.set_source_rgba(font_color.r, font_color.g, font_color.b, font_color.a)
extents = context.text_extents(text)
x,y = (self.position.x+position.x+self.padding.left,
self.position.y+position.y+self.padding.top)
context.move_to(x, y)
pc_context.update_layout(layout)
pc_context.show_layout(layout)
def draw_lines(self, lines, line_color=(0,0,0,1), background_color=(1,1,1,1), line_width=1, line_join='miter', line_cap='butt'):
if lines:
line_color = Color.from_value(line_color)
background_color = Color.from_value(background_color)
context = self.surface.context
start_pos = self.position + lines[0]
context.set_line_width(line_width)
context.move_to(start_pos.x, start_pos.y)
for line in lines[1:]:
next_pos = self.position+line
context.line_to(next_pos.x, next_pos.y)
context.close_path()
try:
context.set_line_cap(self.line_joins[line_join])
except KeyError:
pass
try:
context.set_line_join(self.line_caps[line_cap])
except KeyError:
pass
context.set_source_rgba(background_color.r, background_color.g, background_color.b, background_color.a)
context.fill_preserve()
context.set_source_rgba(line_color.r, line_color.g, line_color.b, line_color.a)
context.stroke()
def render_radial_gradient(self, gradient, inner_radius=None, outer_radius=None):
context = self.surface.context
position = self.position
size = self.size
gradient = RadialGradient.from_value(gradient)
rp = cairo.RadialGradient(gradient.start_position.x*float(size.width) + position.x,
gradient.start_position.y*float(size.height) + position.y,
inner_radius or gradient.inner_radius,
gradient.end_position.x*float(size.width) + position.x,
gradient.end_position.y*float(size.height) + position.y,
outer_radius or gradient.outer_radius)
for gstop in gradient.stops:
rp.add_color_stop_rgba(gstop.offset, gstop.color.r, gstop.color.g,
gstop.color.b, gstop.color.a)
if gradient.stops:
context.save()
context.set_source(rp)
context.fill_preserve()
context.restore()
def render_linear_gradient(self, gradient):
context = self.surface.context
position = self.position
size = self.size
gradient = Gradient.from_value(gradient)
lp = cairo.LinearGradient(gradient.start_position.x*float(size.width) + position.x,
gradient.start_position.y*float(size.height) + position.y,
gradient.end_position.x*float(size.width) + position.x,
gradient.end_position.y*float(size.height) + position.y)
for gstop in gradient.stops:
lp.add_color_stop_rgba(gstop.offset, gstop.color.r, gstop.color.g,
gstop.color.b, gstop.color.a)
if gradient.stops:
context.save()
context.set_source(lp)
context.fill_preserve()
context.restore()
def draw_rounded_rect(self, position, size, background_color=(1,1,1), line_width=1, line_color=(0,0,0), corner_radius=0, line_dashed=False, clip=False, gradient=()):
position = Position.from_value(position)
size = Size.from_value(size)
background_color = Color.from_value(background_color)
line_color = Color.from_value(line_color)
corner_radius = BorderRadius.from_value(corner_radius)
gradient = Gradient.from_value(gradient)
context = self.surface.context
radius = corner_radius
degrees = math.pi / 180.0
x = position.x + self.position.x
y = position.y + self.position.y
width = size.width
height = size.height
if clip: #clips the entire region so any child windows will be confined to the parent
context.new_path()
context.rectangle(x, y, width, height)
context.clip()
context.new_path()
context.arc(x + width - radius.topright - line_width/2.0,
y + radius.topright + line_width/2.0,
radius.topright, -90 * degrees, 0 * degrees)
context.arc(x + width - radius.bottomright - line_width/2.0,
y + height - radius.bottomright - line_width/2.0,
radius.bottomright, 0 * degrees, 90 * degrees)
context.arc(x + radius.bottomleft + line_width/2.0,
y + height - radius.bottomleft - line_width/2.0,
radius.bottomleft, 90 * degrees, 180 * degrees)
context.arc(x + radius.topleft + line_width/2.0,
y + radius.topleft + line_width/2.0,
radius.topleft, 180 * degrees, 270 * degrees)
context.close_path()
if gradient.stops:
if gradient._type == 'linear':
self.render_linear_gradient(gradient)
elif gradient._type == 'radial':
self.render_radial_gradient(gradient)
else:
context.set_source_rgba(background_color.r, background_color.g, background_color.b, background_color.a)
else:
context.set_source_rgba(background_color.r, background_color.g, background_color.b, background_color.a)
context.fill_preserve()
context.set_source_rgba(line_color.r, line_color.g, line_color.b, line_color.a)
context.set_line_width(line_width)
context.save()
if line_dashed:
context.set_dash([line_width, line_width])
context.stroke()
context.restore()
if clip: #clips the entire region so any child windows will be confined to the parent
context.new_path()
context.arc(x + width - radius.topright - line_width/2.0,
y + radius.topright + line_width/2.0,
radius.topright-self.border_width/2.0, -90 * degrees, 0 * degrees)
context.arc(x + width - radius.bottomright - line_width/2.0,
y + height - radius.bottomright - line_width/2.0,
radius.bottomright-self.border_width/2.0, 0 * degrees, 90 * degrees)
context.arc(x + radius.bottomleft + line_width/2.0,
y + height - radius.bottomleft - line_width/2.0,
radius.bottomleft-self.border_width/2.0, 90 * degrees, 180 * degrees)
context.arc(x + radius.topleft + line_width/2.0,
y + radius.topleft + line_width/2.0,
radius.topleft-self.border_width/2.0, 180 * degrees, 270 * degrees)
context.close_path()
context.clip()
def render(self):
if debug and not self.ignore_debug:
self.draw_rounded_rect([0,0], [self.size.width, self.size.height], background_color=(0,0,1,0.1), line_color=(0,0,1,0.4), line_width=self.border_width+0.5, corner_radius=self.border_radius, line_dashed=True)
def draw(self):
if self.visible:
self.surface.context.save()
self.render()
for child in self.children:
child.draw()
self.surface.context.restore()
class Window(WindowEventSource, WindowSurface):
def __init__(self, name, **kwargs):
super(Window, self).__init__()
self._draggable = False
self._resizable = False
self._root = None
self.name = name
position = Position.from_value(kwargs.pop('position', Position()))
size = Size.from_value(kwargs.pop('size', Size()))
self._surface = kwargs.pop('surface', None)
self.min_size = Size.from_value(kwargs.pop('min_size', Size(1,1)))
self.max_size = Size.from_value(kwargs.pop('max_size', Size(-1,-1)))
self.corner_handle_size = Size.from_value(kwargs.pop('corner_handle_size', Size(20, 20)))
self.edge_handle_width = kwargs.pop('edge_handle_width', 10)
self.edge_handle_buffer = Size.from_value(kwargs.pop('edge_handle_buffer', Size(5, 5)))
self.border_width = kwargs.pop('border_width', 1)
self.border_color = Color.from_value(kwargs.pop('border_color', (0,0,0,0)))
self.background_color = Color.from_value(kwargs.pop('background_color', (0,0,0,0)))
self.background_image = self.load_image(kwargs.pop('background_image', None))
self.background_image_filter = kwargs.pop('background_image_filter','none')
self.background_image_stretch_horizontal = kwargs.pop('background_image_stretch_horizontal', False)
self.background_image_stretch_vertical = kwargs.pop('background_image_stretch_vertical', False)
self.background_image_keep_ratio = kwargs.pop('background_image_keep_ratio', False)
self.background_image_center_horizontal = kwargs.pop('background_image_center_horizontal', True)
self.background_image_center_vertical = kwargs.pop('background_image_center_vertical', True)
self.background_image_offset = Position.from_value(kwargs.pop('background_image_offset', (0, 0)))
self.gradient = Gradient.from_value(kwargs.pop('gradient', ()))
self.border_radius = BorderRadius.from_value(kwargs.pop('border_radius', 1))
self.padding = Padding.from_value(kwargs.pop('padding', 0))
self.dashed_border = kwargs.pop('dashed_border', False)
self.clip_children = kwargs.pop('clip_children', False)
self.ignore_debug = kwargs.pop('ignore_debug', False)
self.rectangle = Rectangle()
self.children = []
self.parent = None
self.mouse_pos = Position(size.width/2, size.height/2)
self.mouse_diff = Position(0, 0)
self.mouse_in = False
self.mouse_hover = False
self.mouse_down = False
self.mouse_inputs = dict.fromkeys(self.mouse_button_down_events, False)
self.focused = False
self.visible = True
self.accept('mouse-move', self.process_mouse_move)
self.size = size
self.position = position
self.surface = self._surface
self.resizable = kwargs.pop('resizable', self._resizable)
self.draggable = kwargs.pop('draggable', self._draggable)
for key, value in kwargs.items():
setattr(self, key, value)
def render(self):
super(Window, self).render()
self.draw_rounded_rect([0,0], [self.size.width, self.size.height],
background_color=self.background_color,
line_color=self.border_color,
line_width=self.border_width,
corner_radius=self.border_radius,
line_dashed=self.dashed_border,
clip=self.clip_children, gradient=self.gradient)
if self.background_image is not None:
self.draw_image(self.background_image, [0, 0], self.size,
filter=self.background_image_filter,
stretch_horizontal=self.background_image_stretch_horizontal,
stretch_vertical=self.background_image_stretch_vertical,
keep_ratio=self.background_image_keep_ratio,
center_horizontal=self.background_image_center_horizontal,
center_vertical=self.background_image_center_vertical,
image_offset=self.background_image_offset)
@property
def resizable(self):
return self._resizable
@resizable.setter
def resizable(self, value):
if not self._resizable and value:
self.init_resize_handles()
elif not value and self._resizable:
self.remove_resize_handles()
self._resizable = value
@property
def draggable(self):
return self._draggable
@draggable.setter
def draggable(self, value):
self._draggable = value
self.enable_drag(self._draggable)
def enable_drag(self, value):
if value:
self.accept('mouse-left-drag', self.drag)
self.accept('mouse-left', self.click)
self.accept('mouse-left-up', self.click_up)
else:
self.reject('mouse-left-drag', self.drag)
self.reject('mouse-left', self.click)
self.reject('mouse-left-up', self.click_up)
def _restrict_pos_size_height(self, new_pos, new_size):
if new_size.height <= self.min_size.height:
new_pos.y = self.position.y + self.size.height - self.min_size.height
new_size.height = self.min_size.height
if self.max_size.height > -1 and new_size.height >= self.max_size.height:
new_pos.y = self.position.y + self.size.height - self.max_size.height
new_size.height = self.max_size.height
return new_pos, new_size
def _restrict_pos_size_width(self, new_pos, new_size):
if new_size.width <= self.min_size.width:
new_pos.x = self.position.x + self.size.width - self.min_size.width
new_size.width = self.min_size.width
if self.max_size.width > -1 and new_size.width >= self.max_size.width:
new_pos.x = self.position.x + self.size.width - self.max_size.width
new_size.width = self.max_size.width
return new_pos, new_size
def _restrict_pos_size(self, new_pos, new_size):
self._restrict_pos_size_height(new_pos, new_size)
self._restrict_pos_size_width(new_pos, new_size)
return new_pos, new_size
def drag_bottomright_handle(self, obj, mouse_pos):
diff = mouse_pos - self.mouse_diff
old_size = Size.from_value(self.size)
self.size = self.size + diff
self.mouse_diff.y = obj.position.y + self.handle_diff.y
self.mouse_diff.x = obj.position.x + self.handle_diff.x
def drag_bottomleft_handle(self, obj, mouse_pos):
diff = mouse_pos - self.mouse_diff
old_size = Size.from_value(self.size)
new_pos = Position(self.position.x + diff.x, self.position.y)
new_size = Size(self.size.width - diff.x, self.size.height + diff.y)
new_pos, new_size = self._restrict_pos_size_width(new_pos, new_size)
if self.draggable:
self.position = new_pos
self.size = new_size
else:
self.size = [self.size.width, new_size.height]
self.mouse_diff.y = obj.position.y + self.handle_diff.y
self.mouse_diff.x = obj.position.x + self.handle_diff.x
def drag_topright_handle(self, obj, mouse_pos):
diff = mouse_pos - self.mouse_diff
old_size = Size.from_value(self.size)
new_pos = Position(self.position.x, self.position.y + diff.y)
new_size = Size(self.size.width + diff.x, self.size.height - diff.y)
new_pos, new_size = self._restrict_pos_size_height(new_pos, new_size)
if self.draggable:
self.position = new_pos
self.size = new_size
else:
self.size = [new_size.width, self.size.height]
self.mouse_diff.y = obj.position.y + self.handle_diff.y
self.mouse_diff.x = obj.position.x + self.handle_diff.x
def drag_topleft_handle(self, obj, mouse_pos):
diff = mouse_pos - self.mouse_diff
old_size = Size.from_value(self.size)
new_pos = Position(self.position.x + diff.x, self.position.y + diff.y)
new_size = Size(self.size.width - diff.x, self.size.height - diff.y)
new_pos, new_size = self._restrict_pos_size(new_pos, new_size)
if self.draggable:
self.position = new_pos
self.size = new_size
self.mouse_diff.y = obj.position.y + self.handle_diff.y
self.mouse_diff.x = obj.position.x + self.handle_diff.x
def drag_top_handle(self, obj, mouse_pos):
diff = mouse_pos - self.mouse_diff
old_size = Size.from_value(self.size)
new_pos = Position(self.position.x, self.position.y + diff.y)
new_size = Size(self.size.width, self.size.height - diff.y)
new_pos, new_size = self._restrict_pos_size(new_pos, new_size)
if self.draggable:
self.position = new_pos
self.size = new_size
self.mouse_diff.y = obj.position.y + self.handle_diff.y
self.mouse_diff.x = obj.position.x + self.handle_diff.x
def drag_left_handle(self, obj, mouse_pos):
diff = mouse_pos - self.mouse_diff
old_size = Size.from_value(self.size)
new_pos = Position(self.position.x+diff.x, self.position.y)
new_size = Size(self.size.width - diff.x, self.size.height)
new_pos, new_size = self._restrict_pos_size(new_pos, new_size)
if self.draggable:
self.position = new_pos
self.size = new_size
self.mouse_diff.x = obj.position.x + self.handle_diff.x
def drag_bottom_handle(self, obj, mouse_pos):
diff = mouse_pos - self.mouse_diff
old_height = self.size.height
self.size = Size(self.size.width, self.size.height+diff.y)
self.mouse_diff.y = obj.position.y + self.handle_diff.y
def drag_right_handle(self, obj, mouse_pos):
diff = mouse_pos - self.mouse_diff
old_width = self.size.width
self.size = Size(self.size.width + diff.x, self.size.height)
self.mouse_diff.x = obj.position.x + self.handle_diff.x
def remove_resize_handles(self):
self.remove_child(self.top_handle)
self.remove_child(self.topleft_handle)
self.remove_child(self.topright_handle)
self.remove_child(self.left_handle)
self.remove_child(self.right_handle)
self.remove_child(self.bottom_handle)
self.remove_child(self.bottomright_handle)
self.remove_child(self.bottomleft_handle)
self.top_handle = None
self.topleft_handle = None
self.topright_handle = None
self.left_handle = None
self.right_handle = None
self.bottom_handle = None
self.bottomright_handle = None
self.bottomleft_handle = None
def init_resize_handles(self):
self.handle_diff = Position(0,0)
self.top_handle = Window('top_handle')
self.top_handle.accept('drag', self.drag_top_handle)
self.top_handle.accept('mouse-left', self.handle_click)
self.top_handle.accept('mouse-left-up', self.handle_click_up)
self.topleft_handle = Window('topleft_handle')
self.topleft_handle.accept('drag', self.drag_topleft_handle)
self.topleft_handle.accept('mouse-left', self.handle_click)
self.topleft_handle.accept('mouse-left-up', self.handle_click_up)
self.topright_handle = Window('topright_handle')
self.topright_handle.accept('drag', self.drag_topright_handle)
self.topright_handle.accept('mouse-left', self.handle_click)
self.topright_handle.accept('mouse-left-up', self.handle_click_up)
self.right_handle = Window('right_handle')
self.right_handle.accept('drag', self.drag_right_handle)
self.right_handle.accept('mouse-left', self.handle_click)
self.right_handle.accept('mouse-left-up', self.handle_click_up)
self.bottomright_handle = Window('bottomright_handle')
self.bottomright_handle.accept('drag', self.drag_bottomright_handle)
self.bottomright_handle.accept('mouse-left', self.handle_click)
self.bottomright_handle.accept('mouse-left-up', self.handle_click_up)
self.bottom_handle = Window('bottom_handle')
self.bottom_handle.accept('drag', self.drag_bottom_handle)
self.bottom_handle.accept('mouse-left', self.handle_click)
self.bottom_handle.accept('mouse-left-up', self.handle_click_up)
self.bottomleft_handle = Window('bottomleft_handle')
self.bottomleft_handle.accept('drag', self.drag_bottomleft_handle)
self.bottomleft_handle.accept('mouse-left', self.handle_click)
self.bottomleft_handle.accept('mouse-left-up', self.handle_click_up)
self.left_handle = Window('left_handle')
self.left_handle.accept('drag', self.drag_left_handle)
self.left_handle.accept('mouse-left', self.handle_click)
self.left_handle.accept('mouse-left-up', self.handle_click_up)
self.vertical_edge_size = Size()
self.horizontal_edge_size = Size()
self.add_child(self.top_handle)
self.add_child(self.topleft_handle)
self.add_child(self.topright_handle)
self.add_child(self.left_handle)
self.add_child(self.right_handle)
self.add_child(self.bottom_handle)
self.add_child(self.bottomright_handle)
self.add_child(self.bottomleft_handle)
self.update_resize_handles()
def update_resize_handles(self):
buffer = self.edge_handle_buffer
top = self.top_handle
topleft = self.topleft_handle
topright = self.topright_handle
right = self.right_handle
bottomright = self.bottomright_handle
bottom = self.bottom_handle
bottomleft = self.bottomleft_handle
left = self.left_handle
vertical_edge_size = self.vertical_edge_size
horizontal_edge_size = self.horizontal_edge_size
corner_handle_size = self.corner_handle_size
x, y = self.position.x, self.position.y
vertical_edge_size.width = self.size.width - 2*corner_handle_size.width
vertical_edge_size.height = self.edge_handle_width
horizontal_edge_size.width = self.edge_handle_width
horizontal_edge_size.height = self.size.height - 2*corner_handle_size.height
top.position.x = x + corner_handle_size.width
top.position.y = y - buffer.height
top.size = vertical_edge_size + [0, buffer.height]
topleft.position.x = x - buffer.width
topleft.position.y = y - buffer.height
topleft.size = corner_handle_size + buffer
topright.position.x = x + self.size.width - corner_handle_size.width
topright.position.y = y - buffer.height
topright.size = corner_handle_size + buffer
bottom.position.x = x + corner_handle_size.width
bottom.position.y = y + self.size.height - vertical_edge_size.height
bottom.size = vertical_edge_size + [0, buffer.height]
bottomleft.position.x = x - buffer.width
bottomleft.position.y = y + self.size.height - corner_handle_size.height
bottomleft.size = corner_handle_size + buffer
bottomright.position.x = x + self.size.width-corner_handle_size.width
bottomright.position.y = y + self.size.height-corner_handle_size.height
bottomright.size = corner_handle_size + buffer
right.position.x = x + self.size.width-horizontal_edge_size.width
right.position.y = y + corner_handle_size.height
right.size = horizontal_edge_size + [buffer.width, 0]
left.position.x = x - buffer.width
left.position.y = y + corner_handle_size.height
left.size = horizontal_edge_size + [buffer.width, 0]
def drag(self, obj, mouse_pos):
obj.position = mouse_pos - self.mouse_diff
def click(self, obj, mouse_pos):
self.mouse_diff = mouse_pos - obj.position
def click_up(self, obj, mouse_pos):
self.mouse_diff = Position(0, 0)
def handle_click_up(self, obj, mouse_pos):
self.mouse_diff = Position(0, 0)
self.handle_diff = Position(0, 0)
self.dispatch('resize-end', self)
def handle_click(self, obj, mouse_pos):
self.mouse_diff = mouse_pos
self.handle_diff = mouse_pos - obj.position
self.dispatch('resize-start', self)
def show(self):
self.visible = True
def hide(self):
self.visible = False
def get_clip_parent(self):
parent = self.parent
l = []
while parent is not None:
if parent.clip_children:
l.append(parent)
parent = parent.parent
if l:
return l[-1]
def inject_mouse_down(self, button):
stack = [self]
while stack:
item = stack.pop()
if item.mouse_inside():
log(button, item.name)
item.mouse_inputs[button] = True
item.mouse_down = True
item.grab_focus()
item.dispatch(button, item, item.mouse_pos)
else:
item.release_focus()
if item.children:
stack.extend(item.children)
def inject_mouse_double(self, button):
stack = [self]
while stack:
item = stack.pop()
if item.mouse_inside():
log(button+'-double', item.name)
item.dispatch(button+'-double', item, item.mouse_pos)
if item.children:
stack.extend(item.children)
def inject_mouse_up(self, button):
stack = [self]
while stack:
item = stack.pop()
if item.mouse_held() and item.mouse_inputs[button]:
log(button+'-up', item.name)
item.mouse_down = False
item.mouse_inputs[button] = False
item.dispatch('{}-up'.format(button), item, item.mouse_pos)
if item.children:
stack.extend(item.children)
def inject_mouse_position(self, pos):
mouse_pos = Position.from_value(pos)
stack = [self]
while stack:
item = stack.pop()
diff = mouse_pos - item.mouse_pos
old_pos = item.mouse_pos
item.mouse_pos = mouse_pos
if diff != Position(0,0):
item.dispatch('mouse-move', item, old_pos, self.mouse_pos)
if item.focused and item.mouse_down:
for button, down in item.mouse_inputs.items():
if down:
item.dispatch('{}-drag'.format(button), item, self.mouse_pos)
item.dispatch('drag', item, self.mouse_pos)
if item.children:
stack.extend(item.children)
def inject_mouse_wheel(self, value):
stack = [self]
while stack:
item = stack.pop()
if item.mouse_inside():
log(item.name, 'scroll', value)
item.dispatch('scroll', item, value)
if item.children:
stack.extend(item.children)
def grab_focus(self):
self.focused = True
parent = self
#Reorder all the windows so that they are drawn on top
while parent is not None:
if parent.parent is not None:
children = parent.parent.children[:]
children.remove(parent)
children.append(parent)
parent.parent.children = children
parent = parent.parent
self.dispatch('focus', self)
def release_focus(self):
if self.focused:
log('focus-lost', self.name)
self.focused = False
self.mouse_down = False
#clear events that may have been triggered
#eg. User clicks on one window, holds, and
#then releases on another
for key in self.mouse_inputs:
self.mouse_inputs[key] = False
self.dispatch('focus-lost', self)
@property
def root(self):
if self._root is None:
parent = self.parent
if parent is None:
return self
while parent is not None:
root = parent
parent = parent.parent
self._root = root
return self._root
def mouse_inside(self):
return self is self.surface.current_hover_window
def mouse_held(self):
return self.mouse_down
def process_mouse_move(self, obj, old_mpos, new_mpos):
mouse_focus = self.mouse_inside()
if not self.mouse_held():
if mouse_focus:
if not self.mouse_in:
log('mouse-enter', self.name)
self.dispatch('mouse-enter', self)
self.dispatch('hover', self)
self.mouse_in = True
else:
if self.mouse_in:
log('mouse-leave', self.name)
self.dispatch('mouse-leave', self)
self.mouse_in = False
@property
def surface(self):
return self._surface
@surface.setter
def surface(self, surface):
self._surface = surface
for child in self.children:
child.surface = self._surface
def add_child(self, child_window):
if child_window not in self.children:
child_window.parent = self
child_window.position = child_window.position + self.position +\
[self.border_width/2, self.border_width/2] +\
[self.padding.left, self.padding.top]
child_window.surface = self.surface
self.children.append(child_window)
def remove_child(self, child_window):
try:
self.children.remove(child_window)
child_window.parent = None
except ValueError:
pass
@property
def position(self):
return self.rectangle.position
@position.setter
def position(self, position):
position = Position.from_value(position)
diff = position - self.rectangle.position
if diff.x != 0 or diff.y != 0:
self.dispatch('move', self, position)
for child in self.children:
child.position = child.position + diff
self.rectangle.position = position
@property
def size(self):
return self.rectangle.size
@size.setter
def size(self, size):
size = Size.from_value(size)
if size.height <= self.min_size.height:
size.height = self.min_size.height
if size.width <= self.min_size.width:
size.width = self.min_size.width
if self.max_size.width > -1 and size.width >= self.max_size.width:
size.width = self.max_size.width
if self.max_size.height > -1 and size.height >= self.max_size.height:
size.height = self.max_size.height
diff = size - self.rectangle.size
if diff.height != 0 or diff.width != 0:
self.dispatch('resize', self, size)
for child in self.children:
pass
# child.size = child.size + diff
self.rectangle.size = size
if self.resizable:
self.update_resize_handles()
class Mouse(Window):
def __init__(self, *args, **kwargs):
super(Mouse, self).__init__(*args, **kwargs)
self.lines = [
[0, 0],
[0, self.size.height*0.85],
[self.size.width*0.32, self.size.height*0.675],
[self.size.width*0.52, self.size.height*0.9],
[self.size.width*0.72, self.size.height*0.85],
[self.size.width*0.52, self.size.height*0.62],
[self.size.width*0.92, self.size.height*0.6]
]
def render(self):
self.draw_lines(self.lines, line_width=self.size.height/20)
class TextWindow(Window):
def __init__(self, name, text, *args, **kwargs):
super(TextWindow, self).__init__(name, *args, **kwargs)
self.font_size = kwargs.pop('font_size', 12)
self.font_style = kwargs.pop('font_style', 'normal')
self.font_weight = kwargs.pop('font_weight', 'normal')
self.font_family = kwargs.pop('font_family', 'Sans')
self.word_wrap = kwargs.pop('word_wrap', 'word')
self.font_color = Color.from_value(kwargs.pop('font_color', (0,0,0,1)))
self.text = text
def render(self):
super(TextWindow, self).render()
self.draw_text(self.text, [0,0],
self.font_size, font_weight=self.font_weight,
font_style=self.font_style, font_family=self.font_family,
font_color=self.font_color, word_wrap=self.word_wrap)
class ImageWindow(Window):
def __init__(self, name, image_path, *args, **kwargs):
super(ImageWindow, self).__init__(name, *args, **kwargs)
self.image_path = image_path
self.image = self.load_image(self.image_path)
def render(self):
self.draw_image(self.image, [0,0], self.size)
super(ImageWindow, self).render()
|
jyapayne/JGUI
|
jgui/surface/surface.py
|
Python
|
mit
| 45,131
|
[
"Gaussian"
] |
48484bcf24043a739bd4e915bc43de85ef04e08ec29ae7faabd1552f9132d21c
|
#! /usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-admin-set-site-protocols
# Author : Stuart Paterson
########################################################################
"""
Defined protocols for each SE for a given site.
"""
from __future__ import print_function
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.registerSwitch( "", "Site=", "Site for which protocols are to be set (mandatory)" )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... Protocol ...' % Script.scriptName,
'Arguments:',
' Protocol: SE access protocol (mandatory)' ] ) )
Script.parseCommandLine( ignoreErrors = True )
site = None
for switch in Script.getUnprocessedSwitches():
if switch[0].lower() == "site":
site = switch[1]
args = Script.getPositionalArgs()
if not site or not args:
Script.showHelp()
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
result = diracAdmin.setSiteProtocols( site, args, printOutput = True )
if not result['OK']:
print('ERROR: %s' % result['Message'])
exitCode = 2
DIRAC.exit( exitCode )
|
fstagni/DIRAC
|
Interfaces/scripts/dirac-admin-set-site-protocols.py
|
Python
|
gpl-3.0
| 1,389
|
[
"DIRAC"
] |
8dfeeea27986f495b24aef0b574eb87edef619519803c76e8048caf661a109b9
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.