text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
import os
import re
import sys
import time
import pysam
import align
import random
import argparse
import logging
import subprocess
import itertools
import traceback
import cPickle as pickle
import multiprocessing as mp
import numpy as np
import scipy.stats as ss
from uuid import uuid4
from string import maketrans
from operator import itemgetter
from collections import Counter
from collections import OrderedDict as od
from collections import defaultdict as dd
from bx.intervals.intersection import Intersecter, Interval # pip install bx-python
import profile
#######################################
## Classes ##
#######################################
class Genome:
def __init__(self, gfn):
''' gfn = genome file name (.fai or chrom, length tsv) '''
self.chrlen = {} # length of each chromosome
self.chrmap = [] # used for picking chromosomes
self.bp = 0
bins = 100000
with open(gfn, 'r') as g:
for line in g:
if not line.startswith('#'):
chrom, length = line.strip().split()[:2]
self.chrlen[chrom] = int(length)
self.bp += int(length)
for chrom, length in self.chrlen.iteritems():
self.chrmap += [chrom] * int(float(length) / float(self.bp) * bins)
def pick(self):
''' return a random chromosome and position '''
rchrom = random.choice(self.chrmap)
rpos = int(random.uniform(1, self.chrlen[rchrom]))
return rchrom, rpos
def addpad(self, interval, pad):
''' pad interval such that it doesn't go out of bounds '''
chrom, start, end = interval
start = int(start) - int(pad)
end = int(end) + int(pad)
assert chrom in self.chrlen, "error padding interval %s, %s not a known chromosome" % (str(interval), chrom)
if start < 0: start = 0
if end > self.chrlen[chrom]: end = self.chrlen[chrom]
return (chrom, start, end)
def chunk(self, n, seed=None, sorted=False, pad=0):
''' break genome into n evenly-sized chunks, return n lists of (chrom, start, end) '''
chunklen = int(self.bp/n)
chunks = []
intervals = []
chunkleft = chunklen # track how much genome needs to go into each chunk
chromlist = self.chrlen.keys()
if sorted:
chromlist.sort()
else:
if seed is not None: random.seed(seed)
random.shuffle(chromlist)
for chrom in chromlist:
length = self.chrlen[chrom]
lenleft = length
if length <= chunkleft:
chunkleft -= length
lenleft -= length
intervals.append( self.addpad((chrom, 0, length), pad) )
assert lenleft == 0
if chunkleft == 0:
chunkleft = chunklen
chunks.append(intervals)
intervals = []
else:
while lenleft > 0:
if lenleft >= chunkleft:
intervals.append( self.addpad((chrom, length-lenleft, length-lenleft+chunkleft), pad) )
lenleft -= chunkleft
chunkleft = chunklen
chunks.append(intervals)
intervals = []
else: # lenleft < chunkleft
intervals.append( self.addpad((chrom, length-lenleft, length), pad) )
chunkleft -= lenleft
lenleft -= lenleft
return list(itertools.chain.from_iterable(chunks)) # flatten list
class LASTResult:
def __init__(self, res):
self.raw = res
self.score = int(res[0].split()[1].replace('score=', ''))
self.target_id = res[1].split()[1]
self.target_start = int(res[1].split()[2])
self.target_alnsize = int(res[1].split()[3])
self.target_end = self.target_start + self.target_alnsize
self.target_strand = res[1].split()[4]
self.target_seqsize = int(res[1].split()[5])
self.target_align = res[1].split()[6]
self.query_id = res[2].split()[1]
self.query_distnum = 0
if '|' in self.query_id:
self.query_id = res[2].split()[1].split('|')[0]
self.query_distnum = int(res[2].split()[1].split('|')[-1]) # track which distal read this came from
self.query_start = int(res[2].split()[2])
self.query_alnsize = int(res[2].split()[3])
self.query_end = self.query_start + self.query_alnsize
self.query_strand = res[2].split()[4]
self.query_seqsize = int(res[2].split()[5])
self.query_align = res[2].split()[6]
def pct_match(self):
return float(sum([a.upper()==b.upper() for a,b in zip(list(self.query_align), list(self.target_align))])) / float(self.query_alnsize)
def __lt__(self, other):
return self.score > other.score
def __gt__(self, other):
return self.score < other.score
def __str__(self):
return "\n".join(self.raw)
class SortableRead:
def __init__(self, read):
self.read = read
self.seq = read.seq
self.seqstart = read.reference_start-read.query_alignment_start
def __gt__(self, other):
if self.read.tid == other.read.tid:
return self.seqstart > other.seqstart
else:
return self.read.tid > other.read.tid
class SplitRead:
''' store information about split read alignment '''
def __init__(self, chrom, read, bamfn):
self.uuid = str(uuid4())
self.chrom = chrom
self.read = read
self.bamfn = os.path.basename(bamfn)
self.cliplen = len(read.seq) - len(read.query_alignment_sequence)
self.breakleft = False
self.breakright = False
self.breakpos = None
if read.qstart < read.rlen - read.qend:
self.breakpos = read.get_reference_positions()[-1] # breakpoint on right
self.breakright = True
else:
self.breakpos = read.get_reference_positions()[0] # breakpoint on left
self.breakleft = True
assert self.breakpos is not None
assert self.breakleft != self.breakright
def getRG(self):
''' return read group from RG aux tag '''
for tag, val in self.read.tags:
if tag == 'RG': return val
return None
def __gt__(self, other):
''' enables sorting of SplitRead objects '''
if self.chrom == other.chrom:
return self.breakpos > other.breakpos
else:
return self.chrom > other.chrom
def __str__(self):
dir = 'left'
if self.breakright: dir='right'
return ' '.join(map(str, ('SplitRead:', self.chrom, self.breakpos, self.cliplen, self.read.qname, dir)))
class DiscoRead:
''' store information about discordant pair alignment '''
def __init__(self, chrom, read, bamfn, mate_chrom=None):
self.chrom = chrom
self.read = read
self.bamfn = os.path.basename(bamfn)
self.mate_chrom = mate_chrom # can be None
self.mate_read = None # set later
def mate_mapped(self):
return self.mate_read is not None and not self.mate_read.is_unmapped
def getRG(self):
''' return read group from RG aux tag '''
for tag, val in self.read.tags:
if tag == 'RG': return val
return None
def __gt__(self, other):
if self.mate_mapped() and other.mate_mapped():
return self.mate_read.get_reference_positions()[0] > other.mate_read.get_reference_positions()[0]
else:
return self.read.get_reference_positions()[0] > other.read.get_reference_positions()[0]
def __str__(self):
return ' '.join(map(str, (self.chrom, self.read, self.mate_chrom, self.mate_start)))
class ReadCluster:
''' parent class for read clusters '''
def __init__(self, firstread=None):
self.uuid = str(uuid4())
self.reads = []
self.start = 0
self.end = 0
self.median = 0
self.chrom = None
if firstread is not None:
self.add_read(firstread)
def add_read(self, r):
''' add a read and update '''
self.reads.append(r)
if self.chrom is None: self.chrom = r.chrom
assert self.chrom == r.chrom # clusters can't include > 1 chromosome
''' update statistics '''
positions = []
positions += [pos for r in self.reads for pos in r.read.positions]
self.reads.sort()
self.start = max(positions)
self.end = min(positions)
self.median = int(np.median(positions))
def readgroups(self):
c = Counter([r.getRG() for r in self.reads])
return [str(k[0]) + '|' + str(k[1]) for k in zip(c.keys(), c.values())]
def bamfiles(self):
c = Counter([r.bamfn for r in self.reads])
return [str(k[0]) + '|' + str(k[1]) for k in zip(c.keys(), c.values())]
def find_extrema(self):
''' return leftmost and rightmost aligned positions in cluster vs. reference '''
positions = []
positions += [pos for r in self.reads for pos in r.read.positions]
return min(positions), max(positions)
def avg_matchpct(self):
return np.mean([read_matchpct(r.read) for r in self.reads])
def __len__(self):
return len(self.reads)
class SplitCluster(ReadCluster):
''' store and manipulate groups of SplitRead objects '''
def add_splitread(self, sr):
''' add a SplitRead and update '''
self.reads.append(sr)
if self.chrom is None: self.chrom = sr.chrom
assert self.chrom == sr.chrom # clusters can't include > 1 chromosome
''' update statistics '''
self.reads.sort()
self.start = self.reads[0].breakpos
self.end = self.reads[-1].breakpos
self.median = self.reads[len(self)/2].breakpos
def subcluster_by_breakend(self, breakends, direction='both'):
''' return a new cluster containing only reads with breakpoints in passed list '''
new = SplitCluster()
assert direction in ('both', 'left', 'right')
if direction == 'both':
[new.add_splitread(sr) for sr in self.reads if sr.breakpos in breakends]
if direction == 'left':
[new.add_splitread(sr) for sr in self.reads if sr.breakpos in breakends and sr.breakleft]
if direction == 'right':
[new.add_splitread(sr) for sr in self.reads if sr.breakpos in breakends and sr.breakright]
return new
def consensus(self, minscore = 0.9):
''' build consensus from sorted aligned reads iteratively '''
S = -np.ones((256, 256)) + 2 * np.identity(256)
S = S.astype(np.int16)
sortable_reads = [SortableRead(sr.read) for sr in self.reads]
seqs = [sorted_read.seq for sorted_read in sorted(sortable_reads)]
cons = seqs[0]
scores = []
for seq in seqs[1:]:
s1 = align.string_to_alignment(cons)
s2 = align.string_to_alignment(seq)
(s, a1, a2) = align.align(s1, s2, -2, -2, S, local=True)
a1 = align.alignment_to_string(a1)
a2 = ''.join([b for b in list(align.alignment_to_string(a2)) if b != '-'])
score = float(len(a1) - (len(a1)-s)) / float(len(a1))
scores.append(score)
# print('%s\n%s\nScore: %f' % (a1, a2, score))
if score < minscore and len(cons) == len(seq):
cons = seq
if score >= minscore:
align_end = locate_subseq(seq, a2)[1]
cons += seq[align_end:]
return cons, np.mean(score)
def all_breakpoints(self):
''' returns uniquified list of breakpoints '''
return list(set([read.breakpos for read in self.reads]))
def median_D(self):
return np.median([splitqual(sr.read) for sr in self.reads])
def min_cliplen(self):
return min([sr.cliplen for sr in self.reads])
def max_cliplen(self):
return max([sr.cliplen for sr in self.reads])
def __str__(self):
break_count = Counter([read.breakpos for read in self.reads])
return '\t'.join(map(str, ('SplitCluster:', self.chrom, self.start, self.end, len(self.reads), break_count)))
class DiscoCluster(ReadCluster):
''' store and manipulate groups of DiscoRead objects '''
def overlap_insertion(self, insertion):
iv_ins = [insertion.min_supporting_base(), insertion.max_supporting_base()]
iv_dsc = self.find_extrema()
return min(iv_ins[1], iv_dsc[1]) - max(iv_ins[0], iv_dsc[0]) > 0
def find_mate_extrema(self):
''' return leftmost and rightmost aligned positions in cluster vs. reference '''
positions = []
for r in self.reads:
if r.mate_mapped():
positions += [pos for pos in r.mate_read.positions]
if len(positions) == 0: return -1, -1
return min(positions), max(positions)
def summary_tuple(self):
return (self.reads[0].mate_chrom, self.find_mate_extrema()[0], self.find_mate_extrema()[1], self.readgroups(), self.bamfiles())
class BreakEnd:
''' coallate information about a breakend '''
def __init__(self, chrom, breakpos, cluster, consensus, score, direction):
self.uuid = str(uuid4())
self.cluster = cluster
self.chrom = chrom
self.breakpos = breakpos
self.consensus = consensus
self.consscore = score
self.direction = direction
self.mappings = []
def microhomology(self):
pass # placeholder
def proximal_subread(self):
''' return mapping(s) containing breakpoint '''
return [read for read in self.mappings if self.breakpos in read.get_reference_positions()]
def distal_subread(self):
''' return mapping(s) not containing breakpoint '''
return [read for read in self.mappings if not read.is_unmapped and self.breakpos not in read.get_reference_positions()]
def unmapped_subread(self):
''' returns list of intervals and corresponding subseqs '''
covered_bases = []
for subseq in map(lambda x : x.query_alignment_sequence, self.mappings):
subseq = orient_subseq(self.consensus, subseq)
covered_bases += range(*locate_subseq(self.consensus, subseq))
subseqs = []
intervals = []
interval = []
subseq = []
for p, b in enumerate(list(self.consensus)):
if p not in covered_bases:
if len(interval) > 0 and interval[-1]+1 < p:
intervals.append( (min(interval), max(interval)) )
subseqs.append(''.join(subseq))
interval = []
subseq = []
else:
interval.append(p)
subseq.append(b)
if len(interval) > 0:
intervals.append( (min(interval), max(interval)) )
subseqs.append(''.join(subseq))
return intervals, subseqs
def __len__(self):
return len(self.cluster)
def __str__(self):
return '%s:%d:%s:%s' % (self.chrom, self.breakpos, self.consensus, self.direction)
class Insertion:
''' store and compile information about an insertion with 1 or 2 breakpoints '''
def __init__(self, be1=None, be2=None):
self.uuid = str(uuid4())
self.be1 = None
self.be2 = None
if be1 is not None:
self.be1 = be1
if be2 is not None:
self.be2 = be2
self.be1_alt = None
self.be2_alt = None
self.be1_improved_cons = False
self.be2_improved_cons = False
if self.paired():
if self.be1.breakpos > self.be2.breakpos:
self.be1, self.be2 = self.be2, self.be1 # keep breakends in position order
self.info = od() # set with self.compile_info()
self.discoreads = []
#self.dr_clusters = []
self.fastqrecs = []
self.mappability = None
def __len__(self):
''' return total number of reads (sr+dr) associated with insertion '''
return self.num_sr() + len(self.discoreads)
def num_sr(self):
''' return number of split reads supporting insertion '''
l = 0
if self.be1 is not None: l += len(self.be1.cluster)
if self.be2 is not None: l += len(self.be2.cluster)
return l
def paired(self):
return None not in (self.be1, self.be2)
def breakend_overlap(self):
if not self.paired(): return None
if len(self.be1.proximal_subread()) == 0 or len(self.be2.proximal_subread()) == 0: return None
return ref_dist(self.be1.proximal_subread()[0], self.be2.proximal_subread()[0])
def min_supporting_base(self):
''' return leftmost supporting reference position covered '''
sites = []
for be in (self.be1, self.be2):
if be is not None:
if len(be.proximal_subread()) > 0:
for proxread in be.proximal_subread():
sites += proxread.get_reference_positions()
if len(sites) == 0:
return None
return min(sites)
def max_supporting_base(self):
''' return rightmost supporting reference position covered '''
sites = []
for be in (self.be1, self.be2):
if be is not None:
if len(be.proximal_subread()) > 0:
for proxread in be.proximal_subread():
sites += proxread.get_reference_positions()
if len(sites) == 0:
return None
return max(sites)
def tsd(self, be1_use_prox=0, be2_use_prox=0):
''' target site duplication '''
if not self.paired():
return None
if self.breakend_overlap() > 0:
return None
else:
if len(self.be1.proximal_subread()) == 0 or len(self.be2.proximal_subread()) == 0: return None
junc1 = self.be1.proximal_subread()[be1_use_prox]
junc2 = self.be2.proximal_subread()[be2_use_prox]
tsd_ref_interval = ref_overlap(junc1, junc2)
if tsd_ref_interval is None: return None
tsd_ref_interval[1] += 1
tsdseq1 = ''
tsdseq2 = ''
#for (qrypos, refpos) in junc1.get_aligned_pairs(): # broken by pysam 8.3
for qrypos, refpos in enumerate(junc1.get_reference_positions()):
if refpos in range(*tsd_ref_interval):
if qrypos is not None:
tsdseq1 += junc1.seq[qrypos+junc1.qstart]
#for (qrypos, refpos) in junc2.get_aligned_pairs(): # broken by pysam 8.3
for qrypos, refpos in enumerate(junc2.get_reference_positions()):
if refpos in range(*tsd_ref_interval):
if qrypos is not None:
tsdseq2 += junc2.seq[qrypos+junc2.qstart]
return tsdseq1, tsdseq2
def fetch_discordant_reads(self, bams, isize=10000):
''' Return list of DiscoRead objects '''
chrom = self.be1.chrom
start = self.min_supporting_base()
end = self.max_supporting_base()
if None in (start, end): return []
assert start < end, 'fetch_discordant_reads: start > end'
mapped = {}
unmapped = {}
for bam in bams:
for read in bam.fetch(chrom, start, end):
if read.is_paired and not read.is_unmapped and not read.is_duplicate:
chrom = str(bam.getrname(read.tid))
if read.mate_is_unmapped:
unmapped[read.qname] = DiscoRead(chrom, read, bam.filename)
else:
pair_dist = abs(read.reference_start - read.next_reference_start)
if read.tid != read.next_reference_id or pair_dist > isize:
mate_chrom = str(bam.getrname(read.next_reference_id))
mapped[read.qname] = DiscoRead(chrom, read, bam.filename, mate_chrom)
# get mate info
# mate mapped
for qname, dr in mapped.iteritems():
for read in bam.fetch(dr.mate_chrom, dr.read.next_reference_start, dr.read.next_reference_start+1):
if read.qname == qname and not read.is_secondary and not is_supplementary(read):
if read.seq != mapped[qname].read.seq:
mapped[qname].mate_read = read
# mate unmapped
for read in bam.fetch(chrom, start, end):
if read.is_unmapped and read.qname in unmapped:
if not read.is_secondary and not is_supplementary(read):
unmapped[read.qname].mate_read = read
self.discoreads = mapped.values() + unmapped.values()
def align_filter(self, insref_fa, tmpdir='/tmp'):
''' check whether insertion consensus aligns to '''
if not os.path.exists(insref_fa + '.tis'): build_last_db(insref_fa)
out_fa = '%s/tebreak.align_filter.%s.fa' % (tmpdir, str(uuid4()))
d1, d2, u1, u2 = (None, None, None, None)
if self.be1 is not None:
d1 = self.be1.distal_subread() # pysam.AlignedSegment or None
u1 = self.be1.unmapped_subread()[1] # seq strings or None
if d1 is not None: d1 = map(lambda x: x.seq, d1)
if self.be2 is not None:
d2 = self.be2.distal_subread()
u2 = self.be2.unmapped_subread()[1]
if d2 is not None: d2 = map(lambda x: x.seq, d2)
seqsizes = [20]
with open(out_fa, 'w') as fa:
for seqlist in (d1, d2, u1, u2):
if seqlist is not None:
for seq in seqlist:
if len(seq) >= 10:
seqsizes.append(len(seq))
fa.write('>%s\n%s\n' % (str(uuid4()), seq))
la_results = align_last(out_fa, insref_fa, e=min(seqsizes)-2)
os.remove(out_fa)
if len(la_results) == 0: return True
return False
def improve_consensus(self, ctg_fa, bwaref, tmpdir='/tmp'):
''' attempt to use assembly of all supporting reads to build a better consensus '''
cons_fasta = self.consensus_fasta(tmpdir)
ctg_falib = load_falib(ctg_fa)
build_last_db(ctg_fa)
la_results = align_last(cons_fasta, ctg_fa, e=20)
if self.be1 is not None:
# find corresponding contig, if possible
for res in la_results:
if res.query_id == self.be1.uuid:
# criteria for deciding the new consensus is better
if res.target_seqsize > len(self.be1.consensus) and len(self.be1.consensus) - res.query_alnsize < 10 and res.pct_match() > 0.95:
self.be1_alt = self.be1
self.be1.consensus = ctg_falib[res.target_id]
self.be1_improved_cons = True
if self.be2 is not None:
# find corresponding contig, if possible
for res in la_results:
if res.query_id == self.be2.uuid:
# criteria for deciding the new consensus is better
if res.target_seqsize > len(self.be2.consensus) and len(self.be2.consensus) - res.query_alnsize < 10 and res.pct_match() > 0.95:
self.be2_alt = self.be2
self.be2.consensus = ctg_falib[res.target_id]
self.be2_improved_cons = True
for ext in ('','.amb','.ann','.bck','.bwt','.des','.fai','.pac','.prj','.sa','.sds','.ssp','.suf','.tis'):
if os.path.exists(ctg_fa+ext): os.remove(ctg_fa+ext)
if os.path.exists(cons_fasta): os.remove(cons_fasta)
return self.be1_improved_cons, self.be2_improved_cons
def supportreads_fastq(self, outdir, min_readlen=50, limit=1000):
''' discordant support reads marked DR, split support reads marked SR '''
assert os.path.exists(outdir)
outreads = od()
usedreads = {}
out_fastq = outdir + '/' + '.'.join(('supportreads', self.be1.chrom, str(self.be1.breakpos), 'fq'))
with open(out_fastq, 'w') as out:
for readstore in (self.be1, self.be2, self.discoreads):
if readstore:
try:
rtype = 'SR'
readlist = readstore.cluster.reads
except:
rtype = 'DR'
readlist = readstore
for r in readlist:
read = r.read
name = read.qname
unseen = True
if read.is_read1:
if name + '/1' in usedreads: unseen = False
usedreads[name + '/1'] = True
name += '.%s/1' % rtype
if read.is_read2:
if name + '/2' in usedreads: unseen = False
usedreads[name + '/2'] = True
name += '.%s/2' % rtype
if len(read.seq) > min_readlen and unseen: outreads[name] = read.seq + '\n+\n' + read.qual
if rtype == 'DR' and r.mate_read is not None: # get discordant mates
read = r.mate_read
unseen = True
if read.is_read1:
if name + '/1' in usedreads: unseen = False
usedreads[name + '/1'] = True
name += '.%s/1' % rtype
if read.is_read2:
if name + '/2' in usedreads: unseen = False
usedreads[name + '/2'] = True
name += '.%s/2' % rtype
if len(read.seq) > min_readlen and unseen: outreads[name] = read.seq + '\n+\n' + read.qual
if len(outreads) >= limit or len(outreads) == 0: return None
for name, data in outreads.iteritems():
out.write('@%s\n%s\n' % (name, data))
self.fastqrecs.append('@%s\n%s\n' % (name, data))
return out_fastq
def consensus_fasta(self, tmpdir='/tmp'):
assert os.path.exists(tmpdir)
out_fasta = tmpdir + '/' + '.'.join(('consensus', self.be1.chrom, str(self.be1.breakpos), str(uuid4()), 'fa'))
with open(out_fasta, 'w') as out:
out.write('>%s\n%s\n' % (self.be1.uuid, self.be1.consensus))
if self.be2 is not None:
out.write('>%s\n%s\n' % (self.be2.uuid, self.be2.consensus))
return out_fasta
def compile_info(self, bams):
''' fill self.info with summary info, needs original bam for chromosome lookup '''
if self.be1 == None and self.be2 == None:
return None
self.info['ins_uuid'] = self.uuid
self.info['chrom'] = self.be1.chrom
self.info['min_supporting_base'] = self.min_supporting_base()
self.info['max_supporting_base'] = self.max_supporting_base()
self.info['mappability'] = self.mappability
self.info['be1_breakpos'] = self.be1.breakpos
self.info['be1_obj_uuid'] = self.be1.uuid
#seqs
self.info['be1_cons_seq'] = self.be1.consensus
self.info['be1_prox_seq'] = ','.join(map(lambda x : x.query_alignment_sequence, self.be1.proximal_subread()))
self.info['be1_dist_seq'] = ','.join(map(lambda x : x.query_alignment_sequence, self.be1.distal_subread()))
self.info['be1_umap_seq'] = ','.join(self.be1.unmapped_subread()[1])
if self.be1.proximal_subread() and self.be1.proximal_subread()[0].is_reverse:
self.info['be1_prox_str'] = '-'
else:
self.info['be1_prox_str'] = '+'
self.info['be1_prox_loc'] = []
for subseq in self.info['be1_prox_seq'].split(','):
self.info['be1_prox_loc'].append(locate_subseq(self.be1.consensus, orient_subseq(self.be1.consensus, subseq)))
# stats
self.info['be1_sr_count'] = len(self.be1)
self.info['be1_num_maps'] = len(self.be1.mappings)
self.info['be1_cons_scr'] = self.be1.consscore
self.info['be1_median_D'] = self.be1.cluster.median_D()
self.info['be1_avgmatch'] = self.be1.cluster.avg_matchpct()
self.info['be1_rg_count'] = self.be1.cluster.readgroups()
self.info['be1_bf_count'] = self.be1.cluster.bamfiles()
self.info['be1_prox_mpq'] = ','.join(map(lambda x : str(x.mapq), self.be1.proximal_subread()))
self.info['be1_improved'] = self.be1_improved_cons
if self.info['be1_dist_seq'] == '':
self.info['be1_dist_seq'] = None
else:
self.info['be1_dist_chr'] = ','.join(map(lambda x : bams[0].getrname(x.tid), self.be1.distal_subread()))
self.info['be1_dist_pos'] = ','.join(map(lambda x : str(x.get_reference_positions()[0]), self.be1.distal_subread()))
self.info['be1_dist_end'] = ','.join(map(lambda x : str(x.get_reference_positions()[-1]), self.be1.distal_subread()))
self.info['be1_dist_mpq'] = ','.join(map(lambda x : str(x.mapq), self.be1.distal_subread()))
if self.info['be1_umap_seq'] == '':
self.info['be1_umap_seq'] = None
if self.be2 is not None:
self.info['be2_breakpos'] = self.be2.breakpos
self.info['be2_obj_uuid'] = self.be2.uuid
self.info['be2_cons_seq'] = self.be2.consensus
self.info['be2_prox_seq'] = ','.join(map(lambda x : x.query_alignment_sequence, self.be2.proximal_subread()))
self.info['be2_dist_seq'] = ','.join(map(lambda x : x.query_alignment_sequence, self.be2.distal_subread()))
self.info['be2_umap_seq'] = ','.join(self.be2.unmapped_subread()[1])
if self.be2.proximal_subread() and self.be2.proximal_subread()[0].is_reverse:
self.info['be2_prox_str'] = '-'
else:
self.info['be2_prox_str'] = '+'
self.info['be2_prox_loc'] = []
for subseq in self.info['be2_prox_seq'].split(','):
self.info['be2_prox_loc'].append(locate_subseq(self.be2.consensus, orient_subseq(self.be2.consensus, subseq)))
# stats
self.info['be2_sr_count'] = len(self.be2)
self.info['be2_num_maps'] = len(self.be2.mappings)
self.info['be2_cons_scr'] = self.be2.consscore
self.info['be2_median_D'] = self.be2.cluster.median_D()
self.info['be2_avgmatch'] = self.be2.cluster.avg_matchpct()
self.info['be2_rg_count'] = self.be2.cluster.readgroups()
self.info['be2_bf_count'] = self.be2.cluster.bamfiles()
self.info['be2_prox_mpq'] = ','.join(map(lambda x : str(x.mapq), self.be2.proximal_subread()))
self.info['be2_improved'] = self.be2_improved_cons
if self.info['be2_dist_seq'] == '':
self.info['be2_dist_seq'] = None
else:
self.info['be2_dist_chr'] = ','.join(map(lambda x: bams[0].getrname(x.tid), self.be2.distal_subread()))
self.info['be2_dist_pos'] = ','.join(map(lambda x: str(x.get_reference_positions()[0]), self.be2.distal_subread()))
self.info['be2_dist_end'] = ','.join(map(lambda x: str(x.get_reference_positions()[-1]), self.be2.distal_subread()))
self.info['be2_dist_mpq'] = ','.join(map(lambda x : str(x.mapq), self.be2.distal_subread()))
if self.info['be2_umap_seq'] == '':
self.info['be2_umap_seq'] = None
self.info['be1_use_prox'] = 0
self.info['be2_use_prox'] = 0
if self.info['be1_prox_loc'] == self.info['be2_prox_loc']: # insertion may be completely assembled
if len(self.info['be1_prox_loc']) > 1:
self.info['be1_use_prox'] = 1
elif len(self.info['be2_prox_loc']) > 1:
self.info['be2_use_prox'] = 1
tsdpair = self.tsd(be1_use_prox=self.info['be1_use_prox'], be2_use_prox=self.info['be2_use_prox'])
if tsdpair is not None:
self.info['be1_end_over'], self.info['be2_end_over'] = tsdpair
if 'be2_breakpos' not in self.info:
self.info['be2_breakpos'] = self.info['be1_breakpos']
if 'be2_sr_count' not in self.info:
self.info['be2_sr_count'] = 0
self.info['dr_count'] = len(self.discoreads)
self.info['dr_unmapped_mates'] = len([dr for dr in self.discoreads if dr.mate_read is not None and dr.mate_read.is_unmapped])
#######################################
## Functions ##
#######################################
def rc(dna):
''' reverse complement '''
complements = maketrans('acgtrymkbdhvACGTRYMKBDHV', 'tgcayrkmvhdbTGCAYRKMVHDB')
return dna.translate(complements)[::-1]
def read_matchpct(read):
''' return number of mismatches / aligned length of read '''
nm = [value for (tag, value) in read.tags if tag == 'NM'][0]
return 1.0 - (float(nm)/float(read.alen))
def ref_overlap(read1, read2):
''' return overlapping interval in ref. coords (not chrom), none otherwise '''
if read1 is None or read2 is None:
return None
if read1.is_unmapped or read2.is_unmapped:
return None
iv1 = sorted((read1.get_reference_positions()[0], read1.get_reference_positions()[-1]))
iv2 = sorted((read2.get_reference_positions()[0], read2.get_reference_positions()[-1]))
if min(iv1[1], iv2[1]) - max(iv1[0], iv2[0]) > 0: # is there overlap?
return [max(iv1[0], iv2[0]), min(iv1[1], iv2[1])]
return None
def ref_dist(read1, read2):
''' return distance between intervals in ref., overlapping = negative values '''
if read1 is None or read2 is None:
return None
iv1 = sorted((read1.get_reference_positions()[0], read1.get_reference_positions()[-1]))
iv2 = sorted((read2.get_reference_positions()[0], read2.get_reference_positions()[-1]))
return max(iv1[0], iv2[0]) - min(iv1[1], iv2[1])
def orient_subseq(longseq, shortseq):
''' return shortseq in same orientation as longseq '''
assert len(longseq) >= len(shortseq), 'orient_subseq: %s < %s' % (longseq, shortseq)
if re.search(shortseq, longseq):
return shortseq
else:
assert re.search(rc(shortseq), longseq), "orient_subseq: %s not a subseq of %s" %(shortseq, longseq)
return rc(shortseq)
def locate_subseq(longseq, shortseq):
''' return (start, end) of shortseq in longseq '''
assert len(longseq) >= len(shortseq), 'orient_subseq: %s < %s' % (longseq, shortseq)
match = re.search(shortseq, longseq)
if match is not None:
return sorted((match.start(0), match.end(0)))
return None
def is_primary(read):
''' not included in pysam '''
return not (read.is_secondary or is_supplementary(read))
def is_supplementary(read):
''' pysam does not currently include a check for this flag '''
return bin(read.flag & 2048) == bin(2048)
def fetch_clipped_reads(bams, chrom, start, end, filters):
''' Return list of SplitRead objects '''
assert filters['min_minclip'] > 2
splitreads = []
start = int(start)
end = int(end)
assert start < end
for bam in bams:
for read in bam.fetch(chrom, start, end):
masked = False
if filters['genome_mask'] is not None and chrom in filters['genome_mask']:
if filters['genome_mask'][chrom].find(read.pos, read.pos+1): masked = True
if not masked and not read.is_unmapped and not read.is_duplicate: #and read.mapq > 0:
if read.rlen - read.alen >= int(filters['min_minclip']): # 'soft' clipped?
# length of 'minor' clip
altclip = min(read.qstart, read.rlen-read.qend)
# junk bases
N_count = 0
if 'N' in read.seq: N_count = Counter(read.seq)['N']
if altclip <= 2: # could add as a filter
if N_count <= filters['max_N_consensus'] and splitqual(read) <= filters['max_D_score']:
chrom = str(bam.getrname(read.tid))
if len(read.get_reference_positions()) > 0:
splitreads.append(SplitRead(chrom, read, bam.filename))
return splitreads
def splitqual(read):
''' return KS-test D value for clipped vs. unclipped bases'''
breakpos = None
breakpos = read.get_aligned_pairs()[-1][0] # breakpoint on right
q1 = map(ord, list(read.qual[:breakpos]))
q2 = map(ord, list(read.qual[breakpos:]))
return ss.ks_2samp(q1, q2)[0]
def load_falib(infa):
seqdict = {}
with open(infa, 'r') as fa:
seqid = ''
seq = ''
for line in fa:
if line.startswith('>'):
if seq != '':
seqdict[seqid] = seq
seqid = line.lstrip('>').strip().split()[0]
seq = ''
else:
assert seqid != ''
seq = seq + line.strip()
if seqid not in seqdict and seq != '':
seqdict[seqid] = seq
return seqdict
def build_sr_clusters(splitreads, searchdist=100): # TODO PARAM,
''' cluster SplitRead objects into Cluster objects and return a list of them '''
clusters = []
for sr in splitreads:
if len(clusters) == 0:
clusters.append(SplitCluster(sr))
elif clusters[-1].chrom != sr.chrom:
clusters.append(SplitCluster(sr))
else:
if abs(clusters[-1].median - sr.breakpos) > searchdist:
clusters.append(SplitCluster(sr))
else:
clusters[-1].add_splitread(sr)
return clusters
def build_breakends(cluster, filters, tmpdir='/tmp'):
''' returns list of breakends from cluster '''
breakends = []
for breakpos in cluster.all_breakpoints():
for dir in ('left', 'right'):
subcluster = cluster.subcluster_by_breakend([breakpos], direction=dir)
if len(subcluster) >= filters['min_sr_per_break'] and subcluster.max_cliplen() >= filters['min_maxclip']:
seq = subcluster.reads[0].read.seq
score = 1.0
if len(subcluster) > 1: seq, score = subcluster.consensus()
N_count = 0
if 'N' in seq: N_count = Counter(seq)['N']
if seq != '' and score >= filters['min_consensus_score'] and N_count <= filters['max_N_consensus']:
breakends.append(BreakEnd(cluster.chrom, breakpos, subcluster, seq, score, dir))
return breakends
def map_breakends(breakends, db, tmpdir='/tmp'):
''' remap consensus sequences stored in BreakEnd objects '''
tmp_fa = tmpdir + '/' + '.'.join(('tebreak', str(uuid4()), 'be.fa'))
breakdict = {} # for faster lookup
with open(tmp_fa, 'w') as out:
for be in breakends:
be.mappings = []
breakdict[be.uuid] = be
qual = 'I' * len(be.consensus)
out.write('>%s\n%s\n+\n%s\n' % (be.uuid, be.consensus, qual))
tmp_sam = '.'.join(tmp_fa.split('.')[:-1]) + '.sam'
FNULL = open(os.devnull, 'w')
with open(tmp_sam, 'w') as out:
sam_cmd = ['bwa', 'mem', '-k', '10', '-w', '500', '-M', '-v', '0', db, tmp_fa]
p = subprocess.Popen(sam_cmd, stdout=subprocess.PIPE, stderr=FNULL)
for line in p.stdout:
out.write(line)
sam = pysam.AlignmentFile(tmp_sam)
passed_parse = False
# rarely, pysam will fail to parse the bwa-mem generated SAM and I haven't worked out why... workaround for now
while not passed_parse:
try:
for i, read in enumerate(sam.fetch(until_eof=True)):
breakdict[read.qname].mappings.append(read)
passed_parse = True
except IOError as e:
sys.stderr.write("warning: pysam failed parse at read %d, modifying file and re-try...\n" % i)
sam.close()
lines = []
with open(tmp_sam, 'r') as sam:
lines = [line for line in sam]
with open(tmp_sam, 'w') as sam:
for n, line in enumerate(lines):
if line.startswith('@'): n -= 1 # header
if n < i: sam.write(line)
sam = pysam.AlignmentFile(tmp_sam)
sam.close()
os.remove(tmp_fa)
os.remove(tmp_sam)
return breakdict.values()
def build_last_db(fa):
''' make db for LAST alignments '''
subprocess.call(['lastdb', '-s', '4G', fa, fa])
assert os.path.exists(fa + '.tis'), 'could not lastdb -4G %s %s' % (fa, fa)
def align_last(fa, db, e=20):
''' returns list of LASTResult objects '''
assert os.path.exists(db + '.tis'), 'not a lastdb: %s' % db
last_cmd = ['lastal', '-e', str(e), db, fa]
la_lines = []
la_results = []
p = subprocess.Popen(last_cmd, stdout=subprocess.PIPE)
for line in p.stdout:
if not line.startswith('#'):
if line.strip() != '':
la_lines.append(line.strip())
else:
la_results.append(LASTResult(la_lines))
la_lines = []
return la_results
def score_breakend_pair(be1, be2, k=2.5, s=3.0):
''' assign a score to a breakend, higher is "better" '''
prox1 = be1.proximal_subread()
prox2 = be2.proximal_subread()
if prox1 and prox2:
prox1 = prox1[0]
prox2 = prox2[0]
overlap = abs(min(0, ref_dist(prox1, prox2))) # overlap = negative distance between proximal read mappings i.e. potential TSD
weighted_overlap = ss.gamma(k, scale=s).pdf(overlap) * float(overlap)**2 # TSD length distribution taken into account
distance_penalty = 0
if overlap > 0: distance_penalty = abs(abs(be1.breakpos-be2.breakpos) - overlap) # disagreement in TSD length
if overlap == 0: distance_penalty = abs(be1.breakpos-be2.breakpos) # no TSD
score = weighted_overlap - distance_penalty + len(be1) + len(be2)
return score
return None
def checkref(ref_fasta):
assert os.path.exists(ref_fasta), 'reference not found: %s' % ref_fasta
assert os.path.exists(ref_fasta + '.fai'), 'please run samtools faidx on %s' % ref_fasta
assert os.path.exists(ref_fasta + '.bwt'), 'please run bwa index on %s' % ref_fasta
def build_insertions(breakends, maxdist=100):
''' return list of Insertion objects '''
insertions = []
be_dict = dict([(be.uuid, be) for be in breakends])
be_itree = Intersecter() # interval tree
for be in breakends:
be_itree.add_interval(Interval(be.breakpos-maxdist, be.breakpos+maxdist, value=be.uuid))
pair_scores = []
checked_pairs = {}
for be1 in breakends:
for be2_coords in be_itree.find(be1.breakpos, be1.breakpos+1):
be2 = be_dict[be2_coords.value]
pair_name = '-'.join(sorted((be1.uuid, be2.uuid)))
if be1.uuid != be2.uuid and be1.direction != be2.direction and pair_name not in checked_pairs:
if abs(be1.breakpos-be2.breakpos) <= maxdist:
pair_scores.append((be1.uuid, be2.uuid, score_breakend_pair(be1, be2)))
checked_pairs[pair_name] = True
# sort breakends by score, descending
pair_scores = [score for score in pair_scores if score[2] is not None]
pair_scores.sort(key=itemgetter(2),reverse=True)
used = {} # each breakend can only be used once
for be1_uuid, be2_uuid, score in pair_scores:
if be1_uuid not in used and be2_uuid not in used:
insertions.append(Insertion(be_dict[be1_uuid], be_dict[be2_uuid]))
#print "debug:, insertion: %s and %s" % (be_dict[be1_uuid], be_dict[be2_uuid])
used[be1_uuid] = True
used[be2_uuid] = True
# single-end detections
for be in breakends:
if be.uuid not in used:
insertions.append(Insertion(be))
used[be.uuid] = True
return insertions
def minia(fq, tmpdir='/tmp'):
''' sequence assembly '''
# minia temp files don't seem to be compatabile with concurrency, workaround w/ temp cwd
oldcwd = os.getcwd()
tmpcwd = '%s/%s' % (tmpdir, 'tebreak.'+str(uuid4()))
os.mkdir(tmpcwd)
assert os.path.exists(tmpcwd), 'cannot create temp dir: %s' % tmpcwd
os.chdir(tmpcwd)
if not os.path.exists(fq):
fq = oldcwd + '/' + fq
ctgbase = tmpdir + '/tebreak.minia.%s' % str(uuid4())
cmd = ['minia', '-in', fq, '-abundance-min', '1', '-no-length-cutoff', '-verbose', '0', '-nb-cores', '1', '-out', ctgbase]
FNULL = open(os.devnull, 'w')
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=FNULL)
for line in p.stdout: pass
if os.path.exists(ctgbase + '.h5'): os.remove(ctgbase + '.h5')
os.chdir(oldcwd)
os.rmdir(tmpcwd)
return ctgbase + '.contigs.fa'
def build_mask(bedfile):
''' return a dictionary of interval trees '''
forest = dd(Intersecter)
with open(bedfile, 'r') as bed:
for line in bed:
chrom, start, end = line.strip().split()[:3]
start = int(start)
end = int(end)
forest[chrom].add_interval(Interval(start, end))
return forest
def avgmap(maptabix, chrom, start, end):
''' return average mappability across chrom:start-end region; maptabix = pysam.Tabixfile'''
scores = []
if None in (start, end): return None
if chrom in maptabix.contigs:
for rec in maptabix.fetch(chrom, int(start), int(end)):
mchrom, mstart, mend, mscore = rec.strip().split()
mstart, mend = int(mstart), int(mend)
mscore = float(mscore)
while mstart < mend and mstart:
mstart += 1
if mstart >= int(start) and mstart <= int(end):
scores.append(mscore)
if len(scores) > 0:
return sum(scores) / float(len(scores))
else:
return 0.0
else:
return 0.0
def summarise_insertion(ins):
''' returns a pickleable version of the insertion information '''
pi = dd(dict)
pi['INFO'] = ins.info
pi['READSTORE'] = ins.fastqrecs
return pi
def filter_insertions(insertions, filters, tmpdir='/tmp'):
filtered = []
for ins in insertions:
exclude = False
mapq = map(lambda x : str(x.mapq), ins.be1.proximal_subread())
rgs = [rg.split('|')[0] for rg in ins.be1.cluster.readgroups()]
bams = [bam.split('|')[0] for bam in ins.be1.cluster.bamfiles()]
if ins.be2 is not None and len(ins.be2.proximal_subread()) > 0:
mapq += map(lambda x : str(x.mapq), ins.be2.proximal_subread())
rgs += [rg.split('|')[0] for rg in ins.be2.cluster.readgroups()]
bams += [bam.split('|')[0] for bam in ins.be2.cluster.bamfiles()]
bams = list(set(bams)) # uniqify
if len(ins) >= filters['max_ins_reads']: exclude = True
if max(mapq) < filters['min_prox_mapq']: exclude = True
if ins.num_sr() < filters['min_split_reads']: exclude = True
if filters['exclude_bam']:
for bam in bams:
if bam in filters['exclude_bam']: exclude = True
if filters['exclude_readgroup']:
for rg in rgs:
if rg in filters['exclude_rg']: exclude = True
if filters['max_bam_count'] > 0:
if len(bams) > filters['max_bam_count']: exclude = True
if filters['insertion_library'] is not None and not exclude:
if ins.align_filter(filters['insertion_library'], tmpdir=tmpdir): exclude = True
if filters['map_tabix'] is not None and not exclude:
if ins.be1.chrom in filters['map_tabix'].contigs:
ins.mappability = avgmap(filters['map_tabix'], ins.be1.chrom, ins.min_supporting_base(), ins.max_supporting_base())
else:
ins.mappability = 0.0
if ins.mappability < filters['min_mappability']: exclude = True
if not exclude: filtered.append(ins)
return filtered
def postprocess_insertions(insertions, filters, bwaref, bams, tmpdir='/tmp'):
for ins in insertions:
support_fq = ins.supportreads_fastq(tmpdir, limit=filters['max_ins_reads'])
if support_fq is None: return insertions
cwd = os.getcwd()
support_asm = minia(support_fq, tmpdir=tmpdir)
retry_counter = 0 # minia might not be the most reliable option...
while not os.path.exists(support_asm) and retry_counter < 10:
retry_counter += 1
sys.stderr.write('***Assembly retry: %s:%d\n' % (ins.be1.chrom, ins.be1.breakpos))
support_asm = minia(support_fq, tmpdir=tmpdir)
if not os.path.exists(support_asm):
sys.stderr.write('***Assembly failed!: %s:%d\n' % (ins.be1.chrom, ins.be1.breakpos))
else:
#sys.stderr.write('Assembled: %s:%d, filename: %s\n' % (ins.be1.chrom, ins.be1.breakpos, support_asm))
ins.improve_consensus(support_asm, bwaref, tmpdir=tmpdir)
if os.path.exists(support_fq): os.remove(support_fq)
if os.path.exists(support_asm): os.remove(support_asm)
if os.getcwd() != cwd: os.chdir(cwd)
# collect altered breakends
alt_be_list = []
for ins in insertions:
if ins.be1_improved_cons: alt_be_list.append(ins.be1)
if ins.be2_improved_cons: alt_be_list.append(ins.be2)
remap_be_dict = {}
for be in map_breakends(alt_be_list, bwaref, tmpdir=tmpdir):
remap_be_dict[be.uuid] = be
for ins in insertions:
# use previous mapping if new consensus did not map
if ins.be1_improved_cons:
if ins.be1.uuid in remap_be_dict:
if len(remap_be_dict[ins.be1.uuid].proximal_subread()) > 0:
ins.be1 = remap_be_dict[ins.be1.uuid]
else:
ins.be1 = ins.be1_alt
ins.be1_improved_cons = False
if ins.be2_improved_cons:
if ins.be2.uuid in remap_be_dict:
if len(remap_be_dict[ins.be2.uuid].proximal_subread()) > 0:
ins.be2 = remap_be_dict[ins.be2.uuid]
else:
ins.be2 = ins.be2_alt
ins.be2_improved_cons = False
if ins.be1_improved_cons or ins.be2_improved_cons:
ins.compile_info(bams)
return insertions
def run_chunk(args, exp_rpkm, chrom, start, end):
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
chunkname = '%s:%d-%d' % (chrom, start, end)
try:
bams = [pysam.AlignmentFile(bam, 'rb') for bam in args.bam.split(',')]
# would do this outside but can't pass a non-pickleable object
if args.mask is not None: args.mask = build_mask(args.mask)
if args.map_tabix is not None: args.map_tabix = pysam.Tabixfile(args.map_tabix)
start = int(start)
end = int(end)
filters = {
'min_maxclip': int(args.min_maxclip),
'min_minclip': int(args.min_minclip),
'min_sr_per_break': int(args.min_sr_per_break),
'min_consensus_score': int(args.min_consensus_score),
'max_D_score': float(args.maxD),
'max_ins_reads': int(args.max_ins_reads),
'min_split_reads': int(args.min_split_reads),
#'min_discordant_reads': int(args.min_discordant_reads),
'min_prox_mapq': int(args.min_prox_mapq),
'max_N_consensus': int(args.max_N_consensus),
'max_rpkm': int(args.max_fold_rpkm)*exp_rpkm,
'exclude_bam': [],
'exclude_readgroup': [],
'max_bam_count': int(args.max_bam_count),
'insertion_library': args.insertion_library,
'genome_mask': args.mask,
'map_tabix': args.map_tabix,
'min_mappability': float(args.min_mappability)
}
if args.exclude_bam is not None: filters['exclude_bam'] = map(os.path.basename, args.exclude_bam.split(','))
if args.exclude_readgroup is not None: filters['exclude_readgroup'] = args.exclude_readgroup.split(',')
insertions = []
logger.debug('Processing chunk: %s ...' % chunkname)
logger.debug('Chunk %s: Parsing split reads from bam(s): %s ...' % (chunkname, args.bam))
sr = fetch_clipped_reads(bams, chrom, start, end, filters)
sr.sort()
logger.debug('Chunk %s: Building clusters from %d split reads ...' % (chunkname, len(sr)))
clusters = build_sr_clusters(sr)
logger.debug('Chunk %s: Building breakends...' % chunkname)
breakends = []
for cluster in clusters:
cl_readcount = 0
cl_min, cl_max = cluster.find_extrema()
for bam in bams:
cl_readcount += sum([not read.is_unmapped for read in bam.fetch(cluster.chrom, cl_min, cl_max)])
rpkm = cl_readcount/((cl_max-cl_min)/1000.)
if filters['max_rpkm'] == 0 or rpkm < filters['max_rpkm']:
breakends += build_breakends(cluster, filters, tmpdir=args.tmpdir)
else:
logger.debug('Chunk %s, cluster %d-%d over max RPKM with %f' % (chunkname, cl_min, cl_max, rpkm))
logger.debug('Chunk %s: Mapping %d breakends ...' % (chunkname, len(breakends)))
if len(breakends) > 0:
breakends = map_breakends(breakends, args.bwaref, tmpdir=args.tmpdir)
logger.debug('Chunk %s: Building insertions...' % chunkname)
insertions = build_insertions(breakends)
insertions = [ins for ins in insertions if len(ins.be1.proximal_subread()) > 0] # remove bogus insertions
logger.debug('Chunk %s: Processing and filtering %d potential insertions ...' % (chunkname, len(insertions)))
insertions = filter_insertions(insertions, filters, tmpdir=args.tmpdir)
for ins in insertions:
ins.fetch_discordant_reads(bams)
ins.compile_info(bams)
logger.debug('Chunk %s: Postprocessing %d filtered insertions, trying to improve consensus breakend sequences ...' % (chunkname, len(insertions)))
processed_insertions = postprocess_insertions(insertions, filters, args.bwaref, bams, tmpdir=args.tmpdir)
logger.debug('Chunk %s: Summarising insertions ...' % chunkname)
summarised_insertions = [summarise_insertion(ins) for ins in processed_insertions]
logger.debug('Finished chunk: %s' % chunkname)
for bam in bams: bam.close()
return summarised_insertions
else:
for bam in bams: bam.close()
return []
except Exception, e:
sys.stderr.write('*'*60 + '\tencountered error in chunk: %s\n' % chunkname)
traceback.print_exc(file=sys.stderr)
sys.stderr.write("*"*60 + "\n")
return []
def resolve_duplicates(insertions):
''' resolve instances where breakpoints occur > 1x in the insertion list '''
''' this can happen if intervals overlap, e.g. in genome chunking '''
insdict = od() # --> index in insertions
for n, ins in enumerate(insertions):
be1 = ins['INFO']['chrom'] + ':' + str(ins['INFO']['be1_breakpos'])
be2 = ins['INFO']['chrom'] + ':' + str(ins['INFO']['be2_breakpos'])
if be1 not in insdict:
insdict[be1] = n
insdict[be2] = n
else:
if prefer_insertion(ins, insertions[insdict[be1]]):
insdict[be1] = n
insdict[be2] = n
return [insertions[n] for n in list(set(insdict.values()))]
def prefer_insertion(ins1, ins2):
''' return true if ins1 has more evidence than ins2, false otherwise '''
# prefer two-end support over one end
if ins1['INFO']['be1_breakpos'] != ins1['INFO']['be2_breakpos'] and ins2['INFO']['be1_breakpos'] == ins2['INFO']['be2_breakpos']:
return True
# prefer higher split read count
if ins1['INFO']['be1_sr_count'] + ins1['INFO']['be2_sr_count'] > ins2['INFO']['be1_sr_count'] + ins2['INFO']['be2_sr_count']:
return True
# prefer higher discordant read count
if ins1['INFO']['dr_count'] > ins2['INFO']['dr_count']:
return True
return False
def text_summary(insertions, outfile='tebreak.out'):
with open(outfile, 'w') as out:
for ins in insertions:
if ins is not None:
out.write('#BEGIN\n')
for label, value in ins['INFO'].iteritems():
out.write('%s: %s\n' % (label, str(value)))
out.write('#END\n')
out.write('\n')
def expected_rpkm(bam_files, genome, intervals=None):
''' expected reads per kilobase mapped '''
bams = [pysam.AlignmentFile(bam_file, 'rb') for bam_file in bam_files]
total_mapped_reads = sum([bam.mapped for bam in bams])
km = genome.bp/1000.
if intervals is not None:
total_length = 0
total_mapped_reads = 0
with open(intervals, 'r') as bed:
for line in bed:
chrom, start, end = line.strip().split()[:3]
start = int(start)
end = int(end)
total_length += end - start
for bam in bams:
total_mapped_reads += sum([not read.is_unmapped for read in bam.fetch(chrom, start, end)])
km = total_length/1000.
for bam in bams: bam.close()
return total_mapped_reads/km
def main(args):
''' housekeeping '''
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.debug("commandline: %s" % ' '.join(sys.argv))
checkref(args.bwaref)
if not args.no_shared_mem:
logger.debug("loading bwa index %s into shared memory ..." % args.bwaref)
p = subprocess.Popen(['bwa', 'shm', args.bwaref], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.stdout: pass # wait for bwa to load
logger.debug("loaded.")
''' Chunk genome or use input BED '''
procs = int(args.processes)
chunk_count = int(args.chunks)
if chunk_count < procs: chunks = procs
pool = mp.Pool(processes=procs)
genome = Genome(args.bwaref + '.fai')
chunks = []
exp_rpkm = 0
if args.interval_bed is None or args.wg_rpkm:
if args.interval_bed is None:
chunks = genome.chunk(chunk_count, sorted=True, pad=5000)
if not args.no_rpkm:
if args.rpkm_bam:
exp_rpkm = expected_rpkm(args.rpkm_bam.split(','), genome)
else:
exp_rpkm = expected_rpkm(args.bam.split(','), genome)
else:
if args.rpkm_bam:
exp_rpkm = expected_rpkm(args.rpkm_bam.split(','), genome, intervals=args.interval_bed)
else:
exp_rpkm = expected_rpkm(args.bam.split(','), genome, intervals=args.interval_bed)
if args.interval_bed is not None:
with open(args.interval_bed, 'r') as bed:
chunks = [(line.strip().split()[0], int(line.strip().split()[1]), int(line.strip().split()[2])) for line in bed]
logger.debug("chunk count: %d" % len(chunks))
if not args.no_rpkm and exp_rpkm < 10:
sys.stderr.write("expected RPKM is less than 10, ignoring high RPKM cutoffs...\n")
exp_rpkm = 0
reslist = []
for chunk in chunks:
# run_chunk(args, exp_rpkm, chunk[0], chunk[1], chunk[2]) # uncomment for mp debug
res = pool.apply_async(run_chunk, [args, exp_rpkm, chunk[0], chunk[1], chunk[2]])
reslist.append(res)
insertions = []
for res in reslist:
insertions += res.get()
insertions = resolve_duplicates(insertions)
text_summary(insertions, outfile=args.detail_out)
pickoutfn = re.sub('.bam$', '.tebreak.pickle', os.path.basename(args.bam))
if args.pickle is not None: pickoutfn = args.pickle
with open(pickoutfn, 'w') as pickout:
pickle.dump(insertions, pickout)
#if not args.no_shared_mem:
# sys.stderr.write("unloading bwa index %s from shared memory ...\n" % args.bwaref)
# p = subprocess.Popen(['bwa', 'shm', '-d'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# for line in p.stdout: pass # wait for bwa to unload
logger.debug('Pickled to %s' % pickoutfn)
if __name__ == '__main__':
# set up logger
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT)
parser = argparse.ArgumentParser(description='Find inserted sequences vs. reference')
parser.add_argument('-b', '--bam', required=True, help='target BAM(s): can be comma-delimited list')
parser.add_argument('-r', '--bwaref', required=True, help='bwa/samtools indexed reference genome')
parser.add_argument('-p', '--processes', default=1, help='split work across multiple processes')
parser.add_argument('-c', '--chunks', default=1, help='split genome into chunks (default = # processes), helps control memory usage')
parser.add_argument('-i', '--interval_bed', default=None, help='BED file with intervals to scan')
parser.add_argument('-D', '--maxD', default=0.8, help='maximum value of KS D statistic for split qualities (default = 0.8)')
parser.add_argument('--min_minclip', default=3, help='min. shortest clipped bases per cluster (default = 3)')
parser.add_argument('--min_maxclip', default=10, help='min. longest clipped bases per cluster (default = 10)')
parser.add_argument('--min_sr_per_break', default=1, help='minimum split reads per breakend (default = 1)')
parser.add_argument('--min_consensus_score', default=0.95, help='quality of consensus alignment (default = 0.95)')
parser.add_argument('-m', '--mask', default=None, help='BED file of masked regions')
parser.add_argument('--rpkm_bam', default=None, help='use alternate BAM(s) for RPKM calculation: use original BAMs if using reduced BAM(s) for -b/--bam')
parser.add_argument('--max_fold_rpkm', default=10, help='ignore insertions supported by rpkm*max_fold_rpkm reads (default = 10)')
parser.add_argument('--max_ins_reads', default=1000, help='maximum number of reads per insertion call (default = 1000)')
parser.add_argument('--min_split_reads', default=4, help='minimum total split reads per insertion call (default = 4)')
#parser.add_argument('--min_discordant_reads', default=4, help='minimum discordant read count (default = 4)')
parser.add_argument('--min_prox_mapq', default=10, help='minimum map quality for proximal subread (default = 10)')
parser.add_argument('--max_N_consensus', default=4, help='exclude breakend seqs with > this number of N bases (default = 4)')
parser.add_argument('--exclude_bam', default=None, help='may be comma delimited')
parser.add_argument('--exclude_readgroup', default=None, help='may be comma delimited')
parser.add_argument('--max_bam_count', default=0, help='maximum number of bams supporting per insertion')
parser.add_argument('--insertion_library', default=None, help='for pre-selecting insertion types')
parser.add_argument('--map_tabix', default=None, help='tabix-indexed BED of mappability scores')
parser.add_argument('--min_mappability', default=0.5, help='minimum mappability (default = 0.5; only matters with --map_tabix)')
parser.add_argument('--tmpdir', default='/tmp', help='temporary directory (default = /tmp)')
parser.add_argument('--pickle', default=None, help='pickle output name')
parser.add_argument('--detail_out', default='tebreak.out', help='file to write detailed output')
parser.add_argument('--wg_rpkm', default=False, action='store_true', help='force calculate rpkm over whole genome')
parser.add_argument('--no_rpkm', default=False, action='store_true', help='do not filter sites by rpkm')
parser.add_argument('--no_shared_mem', default=False, action='store_true')
args = parser.parse_args()
main(args)
|
ValentinaPeona/tebreak
|
tebreak/tebreak.py
|
Python
|
mit
| 65,229
|
[
"BWA",
"pysam"
] |
a6c13c8390811d9959ed894184ae7a60c9af3fac7a2ee29583cf9264d95e95a9
|
#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import numpy as np
import espressomd
required_features = ["ELECTROSTATICS", "LENNARD_JONES"]
espressomd.assert_features(required_features)
from espressomd import thermostat
from espressomd import electrostatics
from espressomd import electrostatic_extensions
print("""
=======================================================
= p3m.py =
=======================================================
Program Information:""")
print(espressomd.features())
# System parameters
#############################################################
# 10 000 Particles
box_l = 10
density = 0.3
# Interaction parameters (repulsive Lennard Jones)
#############################################################
lj_eps = 10.0
lj_sig = 1.0
lj_cut = 1.12246
lj_cap = 20
# Integration parameters
#############################################################
system = espressomd.System(box_l=[box_l] * 3)
system.set_random_state_PRNG()
#system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
np.random.seed(seed=system.seed)
system.time_step = 0.01
system.cell_system.skin = 0.4
thermostat.Thermostat().set_langevin(1.0, 1.0)
# warmup integration (with capped LJ potential)
warm_steps = 100
warm_n_times = 30
# do the warmup until the particles have at least the distance min_dist
min_dist = 0.7
# integration
int_steps = 1000
int_n_times = 10
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig,
cutoff=lj_cut, shift="auto")
system.force_cap = lj_cap
print("LJ-parameters:")
print(system.non_bonded_inter[0, 0].lennard_jones.get_params())
# Particle setup
#############################################################
volume = box_l * box_l * box_l
n_part = int(volume * density)
for i in range(n_part):
system.part.add(id=i, pos=np.random.random(3) * system.box_l)
system.analysis.dist_to(0)
print("Simulate {} particles in a cubic simulation box {} at density {}."
.format(n_part, box_l, density).strip())
print("Interactions:\n")
act_min_dist = system.analysis.min_dist()
print("Start with minimal distance {}".format(act_min_dist))
system.cell_system.max_num_cells = 2744
# Assingn charge to particles
for i in range(n_part // 2 - 1):
system.part[2 * i].q = -1.0
system.part[2 * i + 1].q = 1.0
# P3M setup after charge assigned
#############################################################
print("\nSCRIPT--->Create p3m\n")
#p3m = electrostatics.P3M_GPU(prefactor=2.0, accuracy=1e-2)
p3m = electrostatics.P3M(prefactor=1.0, accuracy=1e-2)
print("\nSCRIPT--->Add actor\n")
system.actors.add(p3m)
print("\nSCRIPT--->P3M parameter:\n")
p3m_params = p3m.get_params()
for key in list(p3m_params.keys()):
print("{} = {}".format(key, p3m_params[key]))
print("\nSCRIPT--->Explicit tune call\n")
p3m.tune(accuracy=1e3)
print("\nSCRIPT--->P3M parameter:\n")
p3m_params = p3m.get_params()
for key in list(p3m_params.keys()):
print("{} = {}".format(key, p3m_params[key]))
# elc=electrostatic_extensions.ELC(maxPWerror=1.0,gap_size=1.0)
# system.actors.add(elc)
print(system.actors)
#############################################################
# Warmup Integration #
#############################################################
# open Observable file
obs_file = open("pylj_liquid.obs", "w")
obs_file.write("# Time\tE_tot\tE_kin\tE_pot\n")
print("""
Start warmup integration:
At maximum {} times {} steps
Stop if minimal distance is larger than {}
""".strip().format(warm_n_times, warm_steps, min_dist))
# set LJ cap
lj_cap = 20
system.force_cap = lj_cap
print(system.non_bonded_inter[0, 0].lennard_jones)
# Warmup Integration Loop
i = 0
while (i < warm_n_times or act_min_dist < min_dist):
system.integrator.run(warm_steps)
# Warmup criterion
act_min_dist = system.analysis.min_dist()
i += 1
print(
"i =",
i,
"system.analysis.min_dist() = ",
system.analysis.min_dist(),
"lj_cap = ",
lj_cap)
# Increase LJ cap
lj_cap += 20
system.force_cap = lj_cap
# Just to see what else we may get from the c code
import pprint
pprint.pprint(system.cell_system.get_state(), width=1)
# pprint.pprint(system.part.__getstate__(), width=1)
pprint.pprint(system.__getstate__())
# write parameter file
set_file = open("pylj_liquid.set", "w")
set_file.write("box_l %s\ntime_step %s\nskin %s\n" %
(box_l, system.time_step, system.cell_system.skin))
#############################################################
# Integration #
#############################################################
print("\nStart integration: run %d times %d steps" % (int_n_times, int_steps))
# remove force capping
lj_cap = 0
system.force_cap = lj_cap
print(system.non_bonded_inter[0, 0].lennard_jones)
# print(initial energies)
energies = system.analysis.energy()
print(energies)
j = 0
for i in range(0, int_n_times):
print("run %d at time=%f " % (i, system.time))
system.integrator.run(int_steps)
energies = system.analysis.energy()
print(energies)
obs_file.write('{ time %s } %s\n' % (system.time, energies))
# write end configuration
end_file = open("pylj_liquid.end", "w")
end_file.write("{ time %f } \n { box_l %f }\n" % (system.time, box_l))
end_file.write("{ particles {id pos type} }")
for i in range(n_part):
end_file.write("%s\n" % system.part[i].pos)
obs_file.close()
set_file.close()
end_file.close()
# terminate program
print("\nFinished.")
|
hmenke/espresso
|
samples/p3m.py
|
Python
|
gpl-3.0
| 6,556
|
[
"ESPResSo"
] |
2853ac103adaf74fb8b3fef7283aeca193fdb87b9312e7c9bddb67cd477acc88
|
#!/usr/bin/env python
from __future__ import division
"""Variational Bayes model to fuse
DESCRIPTION
"""
__author__ = "Nick Sweet"
__copyright__ = "Copyright 2015, Cohrint"
__credits__ = ["Nick Sweet", "Nisar Ahmed"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Nick Sweet"
__email__ = "nick.sweet@colorado.edu"
__status__ = "Development"
import logging
import itertools
import numpy as np
from numpy.linalg import inv, det
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from descartes.patch import PolygonPatch
from cops_and_robots.fusion.softmax import (speed_model,
intrinsic_space_model,
binary_speed_model,
range_model,
binary_range_model,
camera_model_2D)
from cops_and_robots.fusion.gaussian_mixture import (GaussianMixture,
fleming_prior,
uniform_prior
)
from cops_and_robots.map_tools.map_elements import MapObject
from numba import jit
class VariationalBayes(object):
"""short description of VariationalBayes
long description of VariationalBayes
Parameters
----------
param : param_type, optional
param_description
Attributes
----------
attr : attr_type
attr_description
Methods
----------
attr : attr_type
attr_description
"""
def __init__(self,
num_EM_convergence_loops=15,
EM_convergence_tolerance=10 ** -3,
max_EM_steps=250,
num_importance_samples=500,
num_mixand_samples=500,
weight_threshold=None,
mix_sm_corr_thresh=0.98,
max_num_mixands=50):
self.num_EM_convergence_loops = num_EM_convergence_loops
self.EM_convergence_tolerance = EM_convergence_tolerance
self.max_EM_steps = max_EM_steps
self.num_importance_samples = num_importance_samples
self.num_mixand_samples = num_mixand_samples
if weight_threshold is None:
weight_threshold = np.finfo(float).eps # Machine precision
self.weight_threshold = weight_threshold
self.mix_sm_corr_thresh = mix_sm_corr_thresh
self.max_num_mixands = max_num_mixands
@jit(cache=True)
def vb_update(self, measurement, likelihood, prior,
init_mean=0, init_var=1, init_alpha=0.5, init_xi=1):
"""Variational bayes update for Gaussian and Softmax.
"""
# Likelihood values
if hasattr(likelihood, 'subclasses'):
m = likelihood.num_subclasses
j = likelihood.subclasses[measurement].id
init_xi = np.ones(likelihood.num_subclasses)
else:
m = likelihood.num_classes
j = likelihood.classes[measurement].id
init_xi = np.ones(likelihood.num_classes)
w = likelihood.weights
b = likelihood.biases
xis, alpha, mu_hat, var_hat, prior_mean, prior_var = \
self._check_inputs(likelihood, init_mean, init_var, init_alpha, init_xi, prior)
dummy_weights = np.zeros((w.shape[0], prior.means[0].shape[0]-w.shape[1]))
w = np.hstack((w, dummy_weights))
converged = False
EM_step = 0
while not converged and EM_step < self.max_EM_steps:
################################################################
# STEP 1 - EXPECTATION
################################################################
# PART A #######################################################
# find g_j
sum1 = 0
for c in range(m):
if c != j:
sum1 += b[c]
sum2 = 0
for c in range(m):
sum2 = xis[c] / 2 \
+ self._lambda(xis[c]) * (xis[c] ** 2 - (b[c] - alpha) ** 2) \
- np.log(1 + np.exp(xis[c]))
g_j = 0.5 * (b[j] - sum1) + alpha * (m / 2 - 1) + sum2
# find h_j
sum1 = 0
for c in range(m):
if c != j:
sum1 += w[c]
sum2 = 0
for c in range(m):
sum2 += self._lambda(xis[c]) * (alpha - b[c]) * w[c]
h_j = 0.5 * (w[j] - sum1) + 2 * sum2
# find K_j
sum1 = 0
for c in range(m):
sum1 += self._lambda(xis[c]) * np.outer(w[c], (w[c]))
K_j = 2 * sum1
K_p = inv(prior_var)
g_p = -0.5 * (np.log(np.linalg.det(2 * np.pi * prior_var))) \
+ prior_mean.T .dot (K_p) .dot (prior_var)
h_p = K_p .dot (prior_mean)
g_l = g_p + g_j
h_l = h_p + h_j
K_l = K_p + K_j
mu_hat = inv(K_l) .dot (h_l)
var_hat = inv(K_l)
# PART B #######################################################
y_cs = np.zeros(m)
y_cs_squared = np.zeros(m)
for c in range(m):
y_cs[c] = w[c].T .dot (mu_hat) + b[c]
y_cs_squared[c] = w[c].T .dot \
(var_hat + np.outer(mu_hat, mu_hat.T)) .dot (w[c]) \
+ 2 * w[c].T .dot (mu_hat) * b[c] + b[c] ** 2
################################################################
# STEP 2 - MAXIMIZATION
################################################################
for i in range(self.num_EM_convergence_loops): # n_{lc}
# PART A ######################################################
# Find xis
for c in range(m):
xis[c] = np.sqrt(y_cs_squared[c] + alpha ** 2 - 2 * alpha
* y_cs[c])
# PART B ######################################################
# Find alpha
num_sum = 0
den_sum = 0
for c in range(m):
num_sum += self._lambda(xis[c]) * y_cs[c]
den_sum += self._lambda(xis[c])
alpha = ((m - 2) / 4 + num_sum) / den_sum
################################################################
# STEP 3 - CONVERGENCE CHECK
################################################################
if EM_step == 0:
prev_log_c_hat = -1000 # Arbitrary value
KLD = 0.5 * (np.log(det(prior_var) / det(var_hat)) +
np.trace(inv(prior_var) .dot (var_hat)) +
(prior_mean - mu_hat).T .dot (inv(prior_var)) .dot
(prior_mean - mu_hat))
sum1 = 0
for c in range(m):
sum1 += 0.5 * (alpha + xis[c] - y_cs[c]) \
- self._lambda(xis[c]) * (y_cs_squared[c] - 2 * alpha
* y_cs[c] + alpha ** 2 - xis[c] ** 2) \
- np.log(1 + np.exp(xis[c]))
# <>TODO: don't forget Mun - unobserved parents!
# <>CHECK - WHY DO WE ADD +1 HERE??
log_c_hat = y_cs[j] - alpha + sum1 - KLD + 1
if np.abs(log_c_hat - prev_log_c_hat) < self.EM_convergence_tolerance:
logging.debug('Convergence reached at step {} with log_c_hat {}'
.format(EM_step, log_c_hat))
break
prev_log_c_hat = log_c_hat
EM_step += 1
# Resize parameters
if mu_hat.size == 1:
mu_post = mu_hat[0]
else:
mu_post = mu_hat
if var_hat.size == 1:
var_post = var_hat[0][0]
else:
var_post = var_hat
logging.debug('VB update found mean of {} and variance of {}.'
.format(mu_post, var_post))
return mu_post, var_post, log_c_hat
def lwis_update(self, prior):
"""
clustering:
pairwise greedy merging - compare means, weights & variances
salmond's method and runnals' method (better)
"""
prior_mean = np.asarray(prior.means[0])
prior_var = np.asarray(prior.covariances[0])
# Importance distribution
q = GaussianMixture(1, prior_mean, prior_var)
# Importance sampling correction
w = np.zeros(num_samples) # Importance weights
x = q.rvs(size=num_samples) # Sampled points
x = np.asarray(x)
if hasattr(likelihood, 'subclasses'):
measurement_class = likelihood.subclasses[measurement]
else:
measurement_class = likelihood.classes[measurement]
for i in range(num_samples):
w[i] = prior.pdf(x[i]) \
* measurement_class.probability(state=x[i])\
/ q.pdf(x[i])
w /= np.sum(w) # Normalize weights
mu_hat = np.zeros_like(np.asarray(mu_VB))
for i in range(num_samples):
x_i = np.asarray(x[i])
mu_hat = mu_hat + x_i .dot (w[i])
var_hat = np.zeros_like(np.asarray(var_VB))
for i in range(num_samples):
x_i = np.asarray(x[i])
var_hat = var_hat + w[i] * np.outer(x_i, x_i)
var_hat -= np.outer(mu_hat, mu_hat)
if mu_hat.size == 1 and mu_hat.ndim > 0:
mu_lwis = mu_hat[0]
else:
mu_lwis = mu_hat
if var_hat.size == 1:
var_lwis = var_hat[0][0]
else:
var_lwis = var_hat
logging.debug('LWIS update found mean of {} and variance of {}.'
.format(mu_lwis, var_lwis))
return mu_lwis, var_lwis, log_c_hat
def vbis_update(self, measurement, likelihood, prior,
exact_likelihoods=None, exact_measurements=None,
init_mean=0, init_var=1, init_alpha=0.5, init_xi=1,
num_samples=None, use_LWIS=False):
"""VB update with importance sampling for Gaussian and Softmax.
"""
if num_samples is None:
num_samples = self.num_importance_samples
logging.debug('use_LWIS: {}'.format(use_LWIS))
if use_LWIS:
q_mu = np.asarray(prior.means[0])
log_c_hat = np.nan
else:
# Use VB update
q_mu, var_VB, log_c_hat = self.vb_update(measurement, likelihood,
prior,
init_mean, init_var,
init_alpha, init_xi)
q_var = np.asarray(prior.covariances[0])
# Importance distribution
q = GaussianMixture(1, q_mu, q_var)
logging.debug('q mean = {}'.format(q.means))
# Importance sampling correction
w = np.zeros(num_samples) # Importance weights
x = q.rvs(size=num_samples) # Sampled points
x = np.asarray(x)
if hasattr(likelihood, 'subclasses'):
measurement_class = likelihood.subclasses[measurement]
else:
measurement_class = likelihood.classes[measurement]
# Sample from all the exact classes in all the sm models from the sm product
# sample each parent of the joint measurement from the distribution generated by the vb update
# take the product of the prior pdf at x and all the parent likelihoods at sample points x
if exact_likelihoods is None:
likelihood_at_x = measurement_class.probability(state=x)
else:
for i, exact_likelihood in enumerate(exact_likelihoods):
if hasattr(exact_likelihood, 'subclasses'):
exact_measurement_class = exact_likelihood.subclasses[exact_measurements[i]]
else:
exact_measurement_class = exact_likelihood.classes[exact_measurements[i]]
if i == 0:
likelihood_at_x = np.ones_like(exact_measurement_class.probability(state=x))
else:
likelihood_at_x *= exact_measurement_class.probability(state=x)
# Compute parameters using samples
w = prior.pdf(x) * likelihood_at_x / q.pdf(x)
w /= np.sum(w) # Normalize weights
mu_hat = np.sum(x.T * w, axis=-1)
# <>TODO: optimize this
var_hat = np.zeros_like(np.asarray(q_var))
for i in range(num_samples):
x_i = np.asarray(x[i])
var_hat = var_hat + w[i] * np.outer(x_i, x_i)
var_hat -= np.outer(mu_hat, mu_hat)
# Ensure properly formatted output
if mu_hat.size == 1 and mu_hat.ndim > 0:
mu_post_vbis = mu_hat[0]
else:
mu_post_vbis = mu_hat
if var_hat.size == 1:
var_post_vbis = var_hat[0][0]
else:
var_post_vbis = var_hat
logging.debug('VBIS update found mean of {} and variance of {}.'
.format(mu_post_vbis, var_post_vbis))
return mu_post_vbis, var_post_vbis, log_c_hat
def update(self, measurement, likelihood, prior, use_LWIS=False,
poly=None, num_std=1, get_raw_beta=False,
exact_likelihoods=None, exact_measurements=None,):
"""VB update using Gaussian mixtures and multimodal softmax.
This uses Variational Bayes with Importance Sampling (VBIS) for
each mixand-softmax pair available.
"""
# If we have a polygon, update only the mixands intersecting with it
if poly is None:
update_intersections_only = False
else:
update_intersections_only = True
h = 0
if likelihood.classes[measurement].has_subclasses:
relevant_subclasses = likelihood.classes[measurement].subclasses
else:
relevant_subclasses = {measurement: likelihood.classes[measurement]}
num_relevant_subclasses = len(relevant_subclasses)
# Use intersecting priors only
if update_intersections_only and hasattr(prior, 'std_ellipses'):
other_priors = prior.copy()
weights = []
means = []
covariances = []
mixand_ids = []
ellipses = prior.std_ellipses(num_std)
any_intersection = False
for i, ellipse in enumerate(ellipses):
try:
has_intersection = poly.intersects(ellipse)
except ValueError:
logging.warn('Null geometry error! Defaulting to true.')
has_intersection = True
if has_intersection:
# Get parameters for intersecting priors
mixand_ids.append(i)
weights.append(prior.weights[i])
means.append(prior.means[i])
covariances.append(prior.covariances[i])
any_intersection = True
if not any_intersection:
logging.debug('No intersection with any ellipse.')
mu_hat = other_priors.means
var_hat = other_priors.covariances
beta_hat = other_priors.weights
return mu_hat, var_hat, beta_hat
# Remove these from the other priors
other_priors.weights = \
np.delete(other_priors.weights, mixand_ids, axis=0)
other_priors.means = \
np.delete(other_priors.means, mixand_ids, axis=0)
other_priors.covariances = \
np.delete(other_priors.covariances, mixand_ids, axis=0)
# Retain total weight of intersection weights for renormalization
max_intersection_weight = sum(weights)
# Create new prior
prior = GaussianMixture(weights, means, covariances)
logging.debug('Using only mixands {} for VBIS fusion. Total weight {}'
.format(mixand_ids, max_intersection_weight))
# Parameters for all new mixands
K = num_relevant_subclasses * prior.weights.size
mu_hat = np.zeros((K, prior.means.shape[1]))
var_hat = np.zeros((K, prior.covariances.shape[1],
prior.covariances.shape[2]))
log_beta_hat = np.zeros(K) # Weight estimates
for u, mixand_weight in enumerate(prior.weights):
mix_sm_corr = 0
# Check to see if the mixand is completely contained within
# the softmax class (i.e. doesn't need an update)
mixand = GaussianMixture(1, prior.means[u], prior.covariances[u])
logging.debug('prior.means[u]: {}'.format(prior.means[u]))
logging.debug('prior.covariances[u]: {}'.format(prior.covariances[u]))
logging.debug('mixand.means: {}'.format(mixand.means))
logging.debug('mixand.covariances: {}'.format(mixand.covariances))
logging.debug('type means: {}'.format(type(prior.means[u])))
mixand_samples = mixand.rvs(self.num_mixand_samples)
p_hat_ru_samples = likelihood.classes[measurement].probability(state=mixand_samples[:,0:2])
# logging.debug('p_hat_ru_samples: {}'.format(p_hat_ru_samples))
mix_sm_corr = np.sum(p_hat_ru_samples) / self.num_mixand_samples
logging.debug('gm and softmax correlation: {}, threshold: {}'
.format(mix_sm_corr, self.mix_sm_corr_thresh))
if mix_sm_corr > self.mix_sm_corr_thresh:
logging.debug('Mixand {}\'s correspondence with {} was {},'
'above the threshold of {}, so VBIS was skipped.'
.format(u, measurement, mix_sm_corr, self.mix_sm_corr_thresh))
# Append the prior's parameters to the mixand parameter lists
mu_hat[h, :] = prior.means[u]
var_hat[h, :] = prior.covariances[u]
log_beta_hat[h] = np.log(mixand_weight)
h +=1
continue
# Otherwise complete the full VBIS update
ordered_subclasses = iter(sorted(relevant_subclasses.iteritems()))
for label, subclass in ordered_subclasses:
# print label
# Compute \hat{P}_s(r|u)
mixand_samples = mixand.rvs(self.num_mixand_samples)
p_hat_ru_samples = subclass.probability(state=mixand_samples)
p_hat_ru_sampled = np.sum(p_hat_ru_samples) / self.num_mixand_samples
logging.debug('Starting vbis_update')
logging.debug('mixand.means: {}'.format(mixand.means))
logging.debug('mixand.covariances: {}'.format(mixand.covariances))
mu_vbis, var_vbis, log_c_hat = \
self.vbis_update(label, subclass.softmax_collection,
mixand, use_LWIS=use_LWIS,
exact_likelihoods=exact_likelihoods,
exact_measurements=exact_measurements,
)
logging.debug('Finished vbis_update')
# Compute log odds of r given u
if np.isnan(log_c_hat): # from LWIS update
log_p_hat_ru = np.log(p_hat_ru_sampled)
else:
log_p_hat_ru = np.max((log_c_hat, np.log(p_hat_ru_sampled)))
# Find log of P(u,r|D_k) \approxequal \hat{B}_{ur}
log_beta_vbis = np.log(mixand_weight) + log_p_hat_ru
# Symmetrize var_vbis
var_vbis = 0.5 * (var_vbis.T + var_vbis)
# Update estimate values
log_beta_hat[h] = log_beta_vbis
mu_hat[h,:] = mu_vbis
var_hat[h,:] = var_vbis
h += 1
# Renormalize and truncate (based on weight threshold)
raw_beta_hats = np.exp(log_beta_hat)
log_beta_hat = log_beta_hat - np.max(log_beta_hat)
unnormalized_beta_hats = np.exp(log_beta_hat)
beta_hat = np.exp(log_beta_hat) / np.sum(np.exp(log_beta_hat))
# Reattach untouched prior values
if update_intersections_only:
beta_hat = unnormalized_beta_hats * max_intersection_weight
beta_hat = np.hstack((other_priors.weights, beta_hat))
mu_hat = np.vstack((other_priors.means, mu_hat))
var_hat = np.concatenate((other_priors.covariances, var_hat))
# Shrink mu, var and beta if necessary
h += other_priors.weights.size
beta_hat = beta_hat[:h]
mu_hat = mu_hat[:h]
var_hat = var_hat[:h]
beta_hat /= beta_hat.sum()
else:
# Shrink mu, var and beta if necessary
beta_hat = beta_hat[:h]
mu_hat = mu_hat[:h]
var_hat = var_hat[:h]
# Threshold based on weights
mu_hat = mu_hat[beta_hat > self.weight_threshold, :]
var_hat = var_hat[beta_hat > self.weight_threshold, :]
beta_hat = beta_hat[beta_hat > self.weight_threshold]
# Check if covariances are positive semidefinite
for i, var in enumerate(var_hat):
try:
assert np.all(np.linalg.det(var) > 0)
except AssertionError, e:
logging.warn('Following variance is not positive '
'semidefinite: \n{}'.format(var))
var_hat[i] = np.eye(var.shape[0]) * 10 ** -3
# Renormalize beta_hat
beta_hat /= beta_hat.sum()
if get_raw_beta:
beta_hat = raw_beta_hats
return mu_hat, var_hat, beta_hat
def _lambda(self, xi_c):
return 1 / (2 * xi_c) * ( (1 / (1 + np.exp(-xi_c))) - 0.5)
def _check_inputs(self, likelihood, init_mean, init_var, init_alpha, init_xi, prior):
# Make sure inputs are numpy arrays
init_mean = np.asarray(init_mean)
init_var = np.asarray(init_var)
init_alpha = np.asarray(init_alpha)
init_xi = np.asarray(init_xi)
if init_xi.ndim != 1:
try:
m = likelihood.num_subclasses
assert init_xi.size == m
except AssertionError:
logging.exception('Initial xi was not the right size.')
raise
init_xi = np.reshape(init_xi, (1, -1))
logging.debug("Initial xi is not the right shape. Reshaping.")
# Preparation
xis = init_xi
alpha = init_alpha
mu_hat = init_mean
var_hat = init_var
# <>EXTEND
prior_mean = prior.means[0]
prior_var = prior.covariances[0]
return xis, alpha, mu_hat, var_hat, prior_mean, prior_var
def comparison_1d():
# Define prior
prior_mean, prior_var = 0.3, 0.01
min_x, max_x = -5, 5
res = 10000
prior = GaussianMixture(1, prior_mean, prior_var)
x_space = np.linspace(min_x, max_x, res)
# Define sensor likelihood
sm = speed_model()
measurement = 'Slow'
measurement_i = sm.class_labels.index(measurement)
# Do a VB update
init_mean, init_var = 0, 1
init_alpha, init_xi = 0.5, np.ones(4)
vb = VariationalBayes()
vb_mean, vb_var, _ = vb.vb_update(measurement, sm, prior, init_mean,
init_var, init_alpha, init_xi)
vb_posterior = GaussianMixture(1, vb_mean, vb_var)
nisar_vb_mean = 0.131005297841171
nisar_vb_var = 6.43335516254277e-05
diff_vb_mean = vb_mean - nisar_vb_mean
diff_vb_var = vb_var - nisar_vb_var
logging.info('Nisar\'s VB update had mean difference {} and var difference {}\n'
.format(diff_vb_mean, diff_vb_var))
# Do a VBIS update
vbis_mean, vbis_var, _ = vb.vbis_update(measurement, sm, prior, init_mean,
init_var, init_alpha, init_xi)
vbis_posterior = GaussianMixture(1, vbis_mean, vbis_var)
nisar_vbis_mean = 0.154223416817080
nisar_vbis_var = 0.00346064073274943
diff_vbis_mean = vbis_mean - nisar_vbis_mean
diff_vbis_var = vbis_var - nisar_vbis_var
logging.info('Nisar\'s VBIS update had mean difference {} and var difference {}\n'
.format(diff_vbis_mean, diff_vbis_var))
# Plot results
likelihood_label = 'Likelihood of \'{}\''.format(measurement)
fig = plt.figure()
ax = fig.add_subplot(111)
sm.classes[measurement].plot(ax=ax, fill_between=False, label=likelihood_label, ls='--')
ax.plot(x_space, prior.pdf(x_space), lw=1, label='prior pdf', c='grey', ls='--')
ax.plot(x_space, vb_posterior.pdf(x_space), lw=2, label='VB posterior', c='r')
ax.fill_between(x_space, 0, vb_posterior.pdf(x_space), alpha=0.2, facecolor='r')
ax.plot(x_space, vbis_posterior.pdf(x_space), lw=2, label='VBIS Posterior', c='g')
ax.fill_between(x_space, 0, vbis_posterior.pdf(x_space), alpha=0.2, facecolor='g')
ax.set_title('VBIS Update')
ax.legend()
ax.set_xlim([0, 0.4])
ax.set_ylim([0, 7])
plt.show()
def comparison_2d():
# Define prior
prior_mean = np.array([2.3, 1.2])
prior_var = np.array([[2, 0.6], [0.6, 2]])
prior = GaussianMixture(1, prior_mean, prior_var)
# Define sensor likelihood
sm = intrinsic_space_model()
measurement = 'Front'
measurement_i = sm.classes[measurement].id
# Do a VB update
init_mean = np.zeros((1,2))
init_var = np.eye(2)
init_alpha = 0.5
init_xi = np.ones(5)
vb = VariationalBayes()
vb_mean, vb_var, _ = vb.vb_update(measurement, sm, prior, init_mean,
init_var, init_alpha, init_xi)
nisar_vb_mean = np.array([1.795546121012238, 2.512627005425541])
nisar_vb_var = np.array([[0.755723395661314, 0.091742424424428],
[0.091742424424428, 0.747611340151417]])
diff_vb_mean = vb_mean - nisar_vb_mean
diff_vb_var = vb_var - nisar_vb_var
logging.info('Nisar\'s VB update had mean difference: \n {}\n and var difference: \n {}\n'
.format(diff_vb_mean, diff_vb_var))
vb_mean, vb_var, _ = vb.vbis_update(measurement, sm, prior, init_mean,
init_var, init_alpha, init_xi)
vb_posterior = GaussianMixture(1, vb_mean, vb_var)
# Define gridded space for graphing
min_x, max_x = -5, 5
min_y, max_y = -5, 5
res = 200
x_space, y_space = np.mgrid[min_x:max_x:1/res,
min_y:max_y:1/res]
pos = np.empty(x_space.shape + (2,))
pos[:, :, 0] = x_space; pos[:, :, 1] = y_space;
levels_res = 30
max_prior = np.max(prior.pdf(pos))
prior_levels = np.linspace(0, max_prior, levels_res)
sm.probability()
max_lh = np.max(sm.probs)
lh_levels = np.linspace(0, max_lh, levels_res)
max_post = np.max(vb_posterior.pdf(pos))
post_levels = np.linspace(0, max_post, levels_res)
# Plot results
fig = plt.figure()
likelihood_label = 'Likelihood of \'{}\''.format(measurement)
prior_ax = plt.subplot2grid((2,32), (0,0), colspan=14)
prior_cax = plt.subplot2grid((2,32), (0,14), colspan=1)
prior_c = prior_ax.contourf(x_space, y_space, prior.pdf(pos), levels=prior_levels)
cbar = plt.colorbar(prior_c, cax=prior_cax)
prior_ax.set_xlabel('x1')
prior_ax.set_ylabel('x2')
prior_ax.set_title('Prior Distribution')
lh_ax = plt.subplot2grid((2,32), (0,17), colspan=14)
lh_cax = plt.subplot2grid((2,32), (0,31), colspan=1)
sm.classes[measurement].plot(ax=lh_ax, label=likelihood_label, plot_3D=False, levels=lh_levels)
# plt.colorbar(sm.probs, cax=lh_cax)
lh_ax.set_title(likelihood_label)
posterior_ax = plt.subplot2grid((2,32), (1,0), colspan=31)
posterior_cax = plt.subplot2grid((2,32), (1,31), colspan=1)
posterior_c = posterior_ax.contourf(x_space, y_space, vb_posterior.pdf(pos), levels=post_levels)
plt.colorbar(posterior_c, cax=posterior_cax)
posterior_ax.set_xlabel('x1')
posterior_ax.set_ylabel('x2')
posterior_ax.set_title('VB Posterior Distribution')
plt.show()
def gmm_sm_test(measurement='Outside'):
# Define prior
# prior = GaussianMixture(weights=[1, 4, 5],
# means=[[0.5, 1.3], # GM1 mean
# [-0.7, -0.6], # GM2 mean
# [0.2, -3], # GM3 mean
# ],
# covariances=[[[0.4, 0.3], # GM1 mean
# [0.3, 0.4]
# ],
# [[0.3, 0.1], # GM2 mean
# [0.1, 0.3]
# ],
# [[0.5, 0.4], # GM3 mean
# [0.4, 0.5]],
# ])
prior = GaussianMixture(weights=[1, 1, 1, 1, 1],
means=[[-2, -4], # GM1 mean
[-1, -2], # GM2 mean
[0, 0], # GM3 mean
[1, -2], # GM4 mean
[2, -4], # GM5 mean
],
covariances=[[[0.1, 0], # GM1 mean
[0, 0.1]
],
[[0.2, 0], # GM2 mean
[0, 0.2]
],
[[0.3, 0], # GM3 mean
[0, 0.3]
],
[[0.2, 0], # GM4 mean
[0, 0.2]
],
[[0.1, 0], # GM5 mean
[0, 0.1]],
])
# prior = GaussianMixture(weights=[1],
# means=[[-2, -4], # GM1 mean
# ],
# covariances=[[[0.1, 0], # GM1 mean
# [0, 0.1]
# ],
# ])
# Define sensor likelihood
brm = range_model()
# Do a VBIS update
logging.info('Starting VB update...')
vb = VariationalBayes()
mu_hat, var_hat, beta_hat = vb.update(measurement, brm, prior, use_LWIS=True)
vbis_posterior = GaussianMixture(weights=beta_hat, means=mu_hat, covariances=var_hat)
# Define gridded space for graphing
min_x, max_x = -5, 5
min_y, max_y = -5, 5
res = 100
x_space, y_space = np.mgrid[min_x:max_x:1/res,
min_y:max_y:1/res]
pos = np.empty(x_space.shape + (2,))
pos[:, :, 0] = x_space; pos[:, :, 1] = y_space;
levels_res = 50
max_prior = np.max(prior.pdf(pos))
prior_levels = np.linspace(0, max_prior, levels_res)
brm.probability()
max_lh = np.max(brm.probs)
lh_levels = np.linspace(0, max_lh, levels_res)
max_post = np.max(vbis_posterior.pdf(pos))
post_levels = np.linspace(0, max_post, levels_res)
# Plot results
fig = plt.figure()
likelihood_label = 'Likelihood of \'{}\''.format(measurement)
prior_ax = plt.subplot2grid((2,32), (0,0), colspan=14)
prior_cax = plt.subplot2grid((2,32), (0,14), colspan=1)
prior_c = prior_ax.contourf(x_space, y_space, prior.pdf(pos), levels=prior_levels)
cbar = plt.colorbar(prior_c, cax=prior_cax)
prior_ax.set_xlabel('x1')
prior_ax.set_ylabel('x2')
prior_ax.set_title('Prior Distribution')
lh_ax = plt.subplot2grid((2,32), (0,17), colspan=14)
lh_cax = plt.subplot2grid((2,32), (0,31), colspan=1)
brm.classes[measurement].plot(ax=lh_ax, label=likelihood_label, ls='--', levels=lh_levels, show_plot=False, plot_3D=False)
# plt.colorbar(sm.probs, cax=lh_cax)
lh_ax.set_title(likelihood_label)
posterior_ax = plt.subplot2grid((2,32), (1,0), colspan=31)
posterior_cax = plt.subplot2grid((2,32), (1,31), colspan=1)
posterior_c = posterior_ax.contourf(x_space, y_space, vbis_posterior.pdf(pos), levels=post_levels)
plt.colorbar(posterior_c, cax=posterior_cax)
posterior_ax.set_xlabel('x1')
posterior_ax.set_ylabel('x2')
posterior_ax.set_title('VBIS Posterior Distribution')
logging.info('Prior Weights: \n {} \n Means: \n {} \n Variances: \n {} \n'.format(prior.weights,prior.means,prior.covariances))
logging.info('Posterior Weights: \n {} \n Means: \n {} \n Variances: \n {} \n'.format(vbis_posterior.weights,vbis_posterior.means,vbis_posterior.covariances))
plt.show()
def compare_to_matlab(measurement='Near'):
prior = GaussianMixture(weights=[1, 1, 1, 1, 1],
means=[[-2, -4], # GM1 mean
[-1, -2], # GM2 mean
[0, 0], # GM3 mean
[1, -2], # GM4 mean
[2, -4], # GM5 mean
],
covariances=[[[0.1, 0], # GM1 mean
[0, 0.1]
],
[[0.2, 0], # GM2 mean
[0, 0.2]
],
[[0.3, 0], # GM3 mean
[0, 0.3]
],
[[0.2, 0], # GM4 mean
[0, 0.2]
],
[[0.1, 0], # GM5 mean
[0, 0.1]],
])
# prior = GaussianMixture(weights=[1],
# means=[[-2, -4], # GM1 mean
# ],
# covariances=[[[0.1, 0], # GM1 mean
# [0, 0.1]
# ],
# ])
# Define sensor likelihood
brm = range_model()
file_ =open('/Users/nick/Downloads/VBIS GM Fusion/nick_output.csv', 'w')
for i in range(30):
# Do a VBIS update
logging.info('Starting VB update...')
vb = VariationalBayes()
mu_hat, var_hat, beta_hat = vb.update(measurement, brm, prior)
# Flatten values
flat = np.hstack((beta_hat, mu_hat.flatten(), var_hat.flatten()))
# Save Flattened values
np.savetxt(file_, np.atleast_2d(flat), delimiter=',')
file_.close()
def camera_test(num_std=1, time_interval=1):
# prior = fleming_prior()
# prior = uniform_prior()
# prior = GaussianMixture(1, np.zeros(2), np.eye(2))
prior = GaussianMixture([1, 1, 1],
np.array([[-7, 0],
[-3, 0],
[1,0],
]),
np.eye(2)[None,:].repeat(3, axis=0)
)
bounds = [-12.5, -3.5, 2.5, 3.5]
min_view_dist = 0.3 # [m]
max_view_dist = 1.0 # [m]
detection_model = camera_model_2D(min_view_dist, max_view_dist)
trajectory = np.zeros((20,2))
ls = np.linspace(-10, 3, 20)
trajectory = np.hstack((ls[:, None], trajectory))
class camera_tester(object):
"""docstring for merged_gm"""
def __init__(self, prior, detection_model, trajectory, num_std=1, bounds=None):
self.fig = plt.figure(figsize=(16,8))
self.gm = prior
self.detection_model = detection_model
self.trajectory = itertools.cycle(trajectory)
self.vb = VariationalBayes()
self.num_std = num_std
if bounds is None:
self.bounds = [-5, -5, 5, 5]
else:
self.bounds = bounds
def update(self,i=0):
self.camera_pose = next(self.trajectory)
logging.info('Moving to pose {}.'.format(self.camera_pose))
self.detection_model.move(self.camera_pose)
# Do a VBIS update
mu, sigma, beta = self.vb.update(measurement='No Detection',
likelihood=detection_model,
prior=self.gm,
use_LWIS=True,
poly=detection_model.poly,
num_std=self.num_std
)
self.gm = GaussianMixture(weights=beta, means=mu, covariances=sigma)
# Log what's going on
logging.info(self.gm)
logging.info('Weight sum: {}'.format(beta.sum()))
self.remove()
self.plot()
def plot(self):
levels_res = 50
self.levels = np.linspace(0, np.max(self.gm.pdf(self.pos)), levels_res)
self.contourf = self.ax.contourf(self.xx, self.yy,
self.gm.pdf(self.pos),
levels=self.levels,
cmap=plt.get_cmap('jet')
)
# Plot camera
self.cam_patch = PolygonPatch(self.detection_model.poly, facecolor='none',
linewidth=2, edgecolor='white')
self.ax.add_patch(self.cam_patch)
# Plot ellipses
self.ellipse_patches = self.gm.plot_ellipses(poly=self.detection_model.poly)
def plot_setup(self):
# Define gridded space for graphing
min_x, max_x = self.bounds[0], self.bounds[2]
min_y, max_y = self.bounds[1], self.bounds[3]
res = 30
self.xx, self.yy = np.mgrid[min_x:max_x:1/res,
min_y:max_y:1/res]
pos = np.empty(self.xx.shape + (2,))
pos[:, :, 0] = self.xx; pos[:, :, 1] = self.yy;
self.pos = pos
# Plot setup
self.ax = self.fig.add_subplot(111)
self.ax.set_title('VBIS with camera detection test')
plt.axis('scaled')
self.ax.set_xlim([min_x, max_x])
self.ax.set_ylim([min_y, max_y])
levels_res = 50
self.levels = np.linspace(0, np.max(self.gm.pdf(self.pos)), levels_res)
cax = self.contourf = self.ax.contourf(self.xx, self.yy,
self.gm.pdf(self.pos),
levels=self.levels,
cmap=plt.get_cmap('jet')
)
self.fig.colorbar(cax)
def remove(self):
if hasattr(self, 'cam_patch'):
self.cam_patch.remove()
del self.cam_patch
if hasattr(self, 'ellipse_patches'):
for patch in self.ellipse_patches:
patch.remove()
del self.ellipse_patches
if hasattr(self,'contourf'):
for collection in self.contourf.collections:
collection.remove()
del self.contourf
gm = camera_tester(prior, detection_model, trajectory, num_std, bounds)
logging.info('Initial GM:')
logging.info(prior)
ani = animation.FuncAnimation(gm.fig, gm.update,
interval=time_interval,
repeat=True,
blit=False,
init_func=gm.plot_setup
)
plt.show()
if __name__ == '__main__':
logger_format = '[%(levelname)-7s] %(funcName)-30s %(message)s'
logging.basicConfig(format=logger_format, level=logging.DEBUG)
np.set_printoptions(precision=10, suppress=True)
# comparison_1d()
# comparison_2d()
# gmm_sm_test('Near')
# compare_to_matlab()
camera_test(num_std=1, time_interval=1000) #[ms]
|
COHRINT/cops_and_robots
|
src/cops_and_robots/fusion/variational_bayes.py
|
Python
|
apache-2.0
| 40,903
|
[
"Gaussian"
] |
93ac0a38fd61952b2bfabf658007dfcf1ec4cd55a8ccc007d145950e2505df14
|
import numpy as np
np.set_printoptions(precision=14)
npoints = 5
keys = ["V", "V_RHO_A", "V_RHO_B", "V_GAMMA_AA", "V_GAMMA_AB", "V_GAMMA_BB"]
funcs = [
["S_X", "XC_LDA_X"],
["B88_X", "XC_GGA_X_B88"],
["B86B_X", "XC_GGA_X_B86_MGC"],
["PW86_X", "XC_GGA_X_PW86"],
["PBE_X", "XC_GGA_X_PBE"],
#["RPBE_X", "XC_GGA_X_RPBE"],
#["SOGGA_X", "XC_GGA_X_SOGGA"],
["PW91_X", "XC_GGA_X_PW91"],
["FT97B_X", "XC_GGA_X_FT97_B"],
["PW92_C", "XC_LDA_C_PW"],
#["B_C"
#["M_C"
["LYP_C", "XC_GGA_C_LYP"],
["PZ81_C", "XC_LDA_C_PZ"],
["P86_C", "XC_GGA_C_P86"],
["PW91_C", "XC_GGA_C_PW91"],
["PBE_C", "XC_GGA_C_PBE"],
["FT97_C", "XC_GGA_C_FT97"],
["VWN3_C", "XC_LDA_C_VWN_3"],
["VWN5_C", "XC_LDA_C_VWN"],
["PW92A_C", "XC_LDA_C_PW_MOD"]]
#keys = ["V", "V_RHO_A", "V_GAMMA_AA", "V_GAMMA_AB", "V_GAMMA_BB", "V_RHO_B"]
rho_a = psi4.core.Vector.from_array(np.linspace(0.5, 0.99, npoints))
rho_b = psi4.core.Vector.from_array(np.linspace(0.5, 0.99, npoints))
sigma = psi4.core.Vector.from_array(np.ones((npoints)) * 0.3)
zeros = psi4.core.Vector.from_array(np.zeros((npoints)))
def build_in():
inp = {
'RHO_A' : rho_a,
'RHO_B' : rho_b,
'GAMMA_AA' : sigma,
'GAMMA_AB' : sigma,
'GAMMA_BB' : sigma,
}
return inp
def build_out():
ret = {}
for k in keys:
ret[k] = psi4.core.Vector(npoints)
return ret
for psi_func_name, xc_func_name in funcs:
print("Building functional %s/%s" % (psi_func_name, xc_func_name))
psi_fun = core.Functional.build_base(psi_func_name)
if "GGA" in xc_func_name:
psi_fun.set_gga(True)
print_out("Psi4 functional\n")
psi_fun.print_out()
xc_fun = core.Functional.build_base(xc_func_name)
print_out("XC functional\n")
xc_fun.print_out()
psi_out = build_out()
psi_inp = build_in()
psi_fun.compute_functional(psi_inp, psi_out, npoints, 1, 1.0)
#psi_out["V_RHO_A"].np[:] *= 2
# print("Called psi fun\n")
xc_out = build_out()
xc_inp = build_in()
#xc_inp["GAMMA_AA"].np[:] *= 3
# print("Called XC fun\n")
xc_fun.compute_functional(xc_inp, xc_out, npoints, 1, 1.0)
#xc_out["V"].np[:] *= (rho_b.np[:] + rho_a.np[:])
# print('Here')
for k in keys:
if not np.allclose(psi_out[k].np, xc_out[k].np, atol=1.e-5):
print(" Allclose failed for key %s" % k)
print psi_out[k].np
print xc_out[k].np
print np.linalg.norm(psi_out[k].np - xc_out[k].np)
raise Exception("Test failed")
|
rmcgibbo/psi4public
|
tests/libxc/devl/point_test.py
|
Python
|
lgpl-3.0
| 2,508
|
[
"Psi4"
] |
f794d90335d5dbef0e437199b56b67f1ae9738d0bb1d6d1ece43d4b572dab796
|
# -*- coding: utf-8 -*-
# Simple GUI example
from javax.swing import *
from java.awt import *
import optparse
import sys
## does summation of echoes using external python script
import sys
import os
import os.path
import subprocess
"""
import argparse
parser = argparse.ArgumentParser(description='Add echoes in a qcpmg bruker experiment')
parser.add_argument('-l','--lb',type=float, help='Lorentzian broadening applied to the decaying echo',default=0)
parser.add_argument('-g','--gb',type=float, help='Gaussian broadening applied to each echo',default=0)
parser.add_argument('-n',type=int, help='Number of echo to sum')
parser.add_argument('-c',type=float, help='qcpmg cycle in us')
parser.add_argument('infile',help='Full path of the dataset to process')
"""
import JTutils
dataset = CURDATA()
N = str(1+int(GETPARSTAT("L 22")))
LB = GETPAR("LB")
GB = GETPAR("USERP1")
slope = GETPAR("USERP2")
cycle = float(GETPARSTAT("P 60"))
if cycle < 1: # P60 is not likely to have stored the cycle time then uses historic calculation
# historic qcpmg.jt cycle calculation
D3 = float(GETPARSTAT("D 3"))*1e6
D6 = float(GETPARSTAT("D 6"))*1e6
P4 = float(GETPARSTAT("P 4"))
cycle = 2*(D3+D6)+P4
cycle = str(cycle)
print cycle
fulldataPATH = JTutils.fullpath(dataset)
def canceled(event):
frame0.dispose()
def validated(event):
(GB, LB, slope, N, cycle) = [JTFgb.getText(), JTFlb.getText(), JTFslope.getText(), JTFn.getText(), JTFcycle.getText()]
opt_args = " -g %s -l %s -n %s -c %s -s %s" % (GB, LB, N, cycle, slope)
if echoB.isSelected():
opt_args += " -o "
if aechoB.isSelected():
opt_args += " -e "
JTutils.run_CpyBin_script('qcpmgadd2D_.py', opt_args.split()+[fulldataPATH])
frame0.dispose()
EXEC_PYSCRIPT("RE_PATH('%s')" % (fulldataPATH, ))
#PUTPAR("LB",LB)
#PUTPAR("USERP1",GB)
"""
JLabel("GB:")
LB:
slope:
cycle:
buttonGroup : odd/even/both
button_OK
button_CANCEL
button_HELP
"""
# defined a frame with 2 buttons
Lgb = JLabel("GB", SwingConstants.RIGHT)
JTFgb = JTextField(GB)
Llb = JLabel("LB", SwingConstants.RIGHT)
JTFlb = JTextField(LB)
Ln = JLabel("N", SwingConstants.RIGHT)
JTFn = JTextField(N)
Lslope = JLabel("Slope", SwingConstants.RIGHT)
JTFslope = JTextField(slope)
Lcycle = JLabel("Cycle", SwingConstants.RIGHT)
JTFcycle = JTextField(cycle)
echoB = JRadioButton("sum odd echoes")
aechoB = JRadioButton("sum even echoes")
bechoB = JRadioButton("sum all echoes")
button1 = JButton('OK', actionPerformed=validated)
button2 = JButton('Cancel', actionPerformed=canceled)
# create window with title
frame0 = JFrame('TopSPin / Python GUI Example')
# set window size x, y
frame0.setSize(500, 300)
frame0.setLayout(GridLayout(0,1))
frame1 = JPanel(GridLayout(0,2))
frame0.add(frame1)
frame2 = JPanel()
frame0.add(frame2)
frame3 = JPanel()
frame0.add(frame3)
grpB = ButtonGroup()
grpB.add(echoB)
grpB.add(aechoB)
grpB.add(bechoB)
# layout manager for horizontal alignment
frame1.add(Lgb)
frame1.add(JTFgb)
frame1.add(Llb)
frame1.add(JTFlb)
frame1.add(Lcycle)
frame1.add(JTFcycle)
frame1.add(Ln)
frame1.add(JTFn)
frame1.add(Lslope)
frame1.add(JTFslope)
frame2.add(echoB)
frame2.add(aechoB)
frame2.add(bechoB)
frame3.add(button1)
frame3.add(button2)
frame0.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE)
frame0.setVisible(True)
|
jtrebosc/JTutils
|
TSpy/qcpmgadd2DWin.py
|
Python
|
bsd-3-clause
| 3,320
|
[
"Gaussian"
] |
3e2fb955ec60dd53b3ae15b50ad6fe764fae4c027bb39327ed8d09251eca7f72
|
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2014-2017 The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import bpy
from bpy_extras.io_utils import ExportHelper
from . import project_file_writer
from . import util
class ExportAppleseedScene(bpy.types.Operator, ExportHelper):
"""
Export the scene to an appleseed project on disk.
"""
bl_idname = "appleseed.export_scene"
bl_label = "Export appleseed Scene"
filename_ext = ".appleseed"
filter_glob = bpy.props.StringProperty(default="*.appleseed", options={'HIDDEN'})
@classmethod
def poll(cls, context):
renderer = context.scene.render
return renderer.engine == 'APPLESEED_RENDER'
def execute(self, context):
exporter = project_file_writer.Exporter()
exporter.export(context.scene, util.realpath(self.filepath))
return {'FINISHED'}
def menu_func_export_scene(self, context):
self.layout.operator(ExportAppleseedScene.bl_idname, text="appleseed (.appleseed)")
def register():
bpy.utils.register_class(ExportAppleseedScene)
bpy.types.INFO_MT_file_export.append(menu_func_export_scene)
def unregister():
bpy.utils.unregister_class(ExportAppleseedScene)
bpy.types.INFO_MT_file_export.remove(menu_func_export_scene)
|
jasperges/blenderseed
|
export.py
|
Python
|
mit
| 2,457
|
[
"VisIt"
] |
e888d19ef05d9b3e422f3a9e07f6667a22343f15f7d7055b2294756804ec2aef
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""VIIRS Active Fires Tests.
This module implements tests for VIIRS Active Fires NetCDF and ASCII file
readers.
"""
import io
import os
import unittest
from unittest import mock
import dask.dataframe as dd
import numpy as np
import pandas as pd
from satpy.readers.file_handlers import BaseFileHandler
from satpy.tests.reader_tests.test_netcdf_utils import FakeNetCDF4FileHandler
from satpy.tests.utils import convert_file_content_to_data_array
DEFAULT_FILE_SHAPE = (1, 100)
DEFAULT_LATLON_FILE_DTYPE = np.float32
DEFAULT_LATLON_FILE_DATA = np.arange(start=43, stop=45, step=0.02,
dtype=DEFAULT_LATLON_FILE_DTYPE).reshape(DEFAULT_FILE_SHAPE)
DEFAULT_DETECTION_FILE_DTYPE = np.uint8
DEFAULT_DETECTION_FILE_DATA = np.arange(start=60, stop=100, step=0.4,
dtype=DEFAULT_DETECTION_FILE_DTYPE).reshape(DEFAULT_FILE_SHAPE)
DEFAULT_M13_FILE_DTYPE = np.float32
DEFAULT_M13_FILE_DATA = np.arange(start=300, stop=340, step=0.4,
dtype=DEFAULT_M13_FILE_DTYPE).reshape(DEFAULT_FILE_SHAPE)
DEFAULT_POWER_FILE_DTYPE = np.float32
DEFAULT_POWER_FILE_DATA = np.arange(start=1, stop=25, step=0.24,
dtype=DEFAULT_POWER_FILE_DTYPE).reshape(DEFAULT_FILE_SHAPE)
class FakeModFiresNetCDF4FileHandler(FakeNetCDF4FileHandler):
"""Swap in CDF4 file handler."""
def get_test_content(self, filename, filename_info, filename_type):
"""Mimic reader input file content."""
file_content = {}
file_content['/attr/data_id'] = "AFMOD"
file_content['satellite_name'] = "npp"
file_content['sensor'] = 'VIIRS'
file_content['Fire Pixels/FP_latitude'] = DEFAULT_LATLON_FILE_DATA
file_content['Fire Pixels/FP_longitude'] = DEFAULT_LATLON_FILE_DATA
file_content['Fire Pixels/FP_power'] = DEFAULT_POWER_FILE_DATA
file_content['Fire Pixels/FP_T13'] = DEFAULT_M13_FILE_DATA
file_content['Fire Pixels/FP_T13/attr/units'] = 'kelvins'
file_content['Fire Pixels/FP_confidence'] = DEFAULT_DETECTION_FILE_DATA
file_content['Fire Pixels/attr/units'] = 'none'
file_content['Fire Pixels/shape'] = DEFAULT_FILE_SHAPE
attrs = ('FP_latitude', 'FP_longitude', 'FP_T13', 'FP_confidence')
convert_file_content_to_data_array(
file_content, attrs=attrs,
dims=('z', 'fakeDim0', 'fakeDim1'))
return file_content
class FakeImgFiresNetCDF4FileHandler(FakeNetCDF4FileHandler):
"""Swap in CDF4 file handler."""
def get_test_content(self, filename, filename_info, filename_type):
"""Mimic reader input file content."""
file_content = {}
file_content['/attr/data_id'] = "AFIMG"
file_content['satellite_name'] = "npp"
file_content['sensor'] = 'VIIRS'
file_content['FP_latitude'] = DEFAULT_LATLON_FILE_DATA
file_content['FP_longitude'] = DEFAULT_LATLON_FILE_DATA
file_content['FP_power'] = DEFAULT_POWER_FILE_DATA
file_content['FP_T4'] = DEFAULT_M13_FILE_DATA
file_content['FP_T4/attr/units'] = 'kelvins'
file_content['FP_confidence'] = DEFAULT_DETECTION_FILE_DATA
attrs = ('FP_latitude', 'FP_longitude', 'FP_T13', 'FP_confidence')
convert_file_content_to_data_array(
file_content, attrs=attrs,
dims=('z', 'fakeDim0', 'fakeDim1'))
return file_content
class FakeModFiresTextFileHandler(BaseFileHandler):
"""Fake file handler for text files at moderate resolution."""
def __init__(self, filename, filename_info, filetype_info, **kwargs):
"""Get fake file content from 'get_test_content'."""
super(FakeModFiresTextFileHandler, self).__init__(filename, filename_info, filetype_info)
self.file_content = self.get_test_content()
platform_key = {"NPP": "Suomi-NPP", "J01": "NOAA-20", "J02": "NOAA-21"}
self.platform_name = platform_key.get(self.filename_info['satellite_name'].upper(), "unknown")
def get_test_content(self):
"""Create fake test file content."""
fake_file = io.StringIO(u'''\n\n\n\n\n\n\n\n\n\n\n\n\n\n
24.64015007, -107.57017517, 317.38290405, 0.75, 0.75, 40, 4.28618050
25.90660477, -100.06127167, 331.17962646, 0.75, 0.75, 81, 20.61096764''')
return dd.from_pandas(pd.read_csv(fake_file, skiprows=15, header=None,
names=["latitude", "longitude",
"T13", "Along-scan", "Along-track",
"confidence_pct",
"power"]), chunksize=1)
class FakeImgFiresTextFileHandler(BaseFileHandler):
"""Fake file handler for text files at image resolution."""
def __init__(self, filename, filename_info, filetype_info, **kwargs):
"""Get fake file content from 'get_test_content'."""
super(FakeImgFiresTextFileHandler, self).__init__(filename, filename_info, filetype_info)
self.file_content = self.get_test_content()
def get_test_content(self):
"""Create fake test file content."""
fake_file = io.StringIO(u'''\n\n\n\n\n\n\n\n\n\n\n\n\n\n
24.64015007, -107.57017517, 317.38290405, 0.75, 0.75, 40, 4.28618050
25.90660477, -100.06127167, 331.17962646, 0.75, 0.75, 81, 20.61096764''')
platform_key = {"NPP": "Suomi-NPP", "J01": "NOAA-20", "J02": "NOAA-21"}
self.platform_name = platform_key.get(self.filename_info['satellite_name'].upper(), "unknown")
return dd.from_pandas(pd.read_csv(fake_file, skiprows=15, header=None,
names=["latitude", "longitude",
"T4", "Along-scan", "Along-track",
"confidence_cat",
"power"]), chunksize=1)
class TestModVIIRSActiveFiresNetCDF4(unittest.TestCase):
"""Test VIIRS Fires Reader."""
yaml_file = 'viirs_edr_active_fires.yaml'
def setUp(self):
"""Wrap CDF4 file handler with own fake file handler."""
from satpy._config import config_search_paths
from satpy.readers.viirs_edr_active_fires import VIIRSActiveFiresFileHandler
self.reader_configs = config_search_paths(os.path.join('readers', self.yaml_file))
self.p = mock.patch.object(VIIRSActiveFiresFileHandler, '__bases__', (FakeModFiresNetCDF4FileHandler,))
self.fake_handler = self.p.start()
self.p.is_local = True
def tearDown(self):
"""Stop wrapping the CDF4 file handler."""
self.p.stop()
def test_init(self):
"""Test basic init with no extra parameters."""
from satpy.readers import load_reader
r = load_reader(self.reader_configs)
loadables = r.select_files_from_pathnames([
'AFMOD_j02_d20180829_t2015451_e2017093_b35434_c20180829210527716708_cspp_dev.nc'
])
self.assertEqual(len(loadables), 1)
r.create_filehandlers(loadables)
self.assertTrue(r.file_handlers)
def test_load_dataset(self):
"""Test loading all datasets."""
from satpy.readers import load_reader
r = load_reader(self.reader_configs)
loadables = r.select_files_from_pathnames([
'AFMOD_j02_d20180829_t2015451_e2017093_b35434_c20180829210527716708_cspp_dev.nc'
])
r.create_filehandlers(loadables)
datasets = r.load(['confidence_pct'])
self.assertEqual(len(datasets), 1)
for v in datasets.values():
self.assertEqual(v.attrs['units'], '%')
self.assertEqual(v.attrs['_FillValue'], 255)
self.assertTrue(np.issubdtype(v.dtype, DEFAULT_DETECTION_FILE_DTYPE))
datasets = r.load(['T13'])
self.assertEqual(len(datasets), 1)
for v in datasets.values():
self.assertEqual(v.attrs['units'], 'K')
datasets = r.load(['power'])
self.assertEqual(len(datasets), 1)
for v in datasets.values():
self.assertEqual(v.attrs['units'], 'MW')
self.assertEqual(v.attrs['platform_name'], 'NOAA-21')
self.assertEqual(v.attrs['sensor'], 'VIIRS')
class TestImgVIIRSActiveFiresNetCDF4(unittest.TestCase):
"""Test VIIRS Fires Reader."""
yaml_file = 'viirs_edr_active_fires.yaml'
def setUp(self):
"""Wrap CDF4 file handler with own fake file handler."""
from satpy._config import config_search_paths
from satpy.readers.viirs_edr_active_fires import VIIRSActiveFiresFileHandler
self.reader_configs = config_search_paths(os.path.join('readers', self.yaml_file))
self.p = mock.patch.object(VIIRSActiveFiresFileHandler, '__bases__', (FakeImgFiresNetCDF4FileHandler,))
self.fake_handler = self.p.start()
self.p.is_local = True
def tearDown(self):
"""Stop wrapping the CDF4 file handler."""
self.p.stop()
def test_init(self):
"""Test basic init with no extra parameters."""
from satpy.readers import load_reader
r = load_reader(self.reader_configs)
loadables = r.select_files_from_pathnames([
'AFIMG_npp_d20180829_t2015451_e2017093_b35434_c20180829210527716708_cspp_dev.nc'
])
self.assertEqual(len(loadables), 1)
r.create_filehandlers(loadables)
self.assertTrue(r.file_handlers)
def test_load_dataset(self):
"""Test loading all datasets."""
from satpy.readers import load_reader
r = load_reader(self.reader_configs)
loadables = r.select_files_from_pathnames([
'AFIMG_npp_d20180829_t2015451_e2017093_b35434_c20180829210527716708_cspp_dev.nc'
])
r.create_filehandlers(loadables)
datasets = r.load(['confidence_cat'])
self.assertEqual(len(datasets), 1)
for v in datasets.values():
self.assertEqual(v.attrs['units'], '1')
self.assertEqual(v.attrs['flag_meanings'], ['low', 'medium', 'high'])
self.assertEqual(v.attrs['flag_values'], [7, 8, 9])
datasets = r.load(['T4'])
self.assertEqual(len(datasets), 1)
for v in datasets.values():
self.assertEqual(v.attrs['units'], 'K')
datasets = r.load(['power'])
self.assertEqual(len(datasets), 1)
for v in datasets.values():
self.assertEqual(v.attrs['units'], 'MW')
self.assertEqual(v.attrs['platform_name'], 'Suomi-NPP')
self.assertEqual(v.attrs['sensor'], 'VIIRS')
@mock.patch('satpy.readers.viirs_edr_active_fires.dd.read_csv')
class TestModVIIRSActiveFiresText(unittest.TestCase):
"""Test VIIRS Fires Reader."""
yaml_file = 'viirs_edr_active_fires.yaml'
def setUp(self):
"""Wrap file handler with own fake file handler."""
from satpy._config import config_search_paths
from satpy.readers.viirs_edr_active_fires import VIIRSActiveFiresTextFileHandler
self.reader_configs = config_search_paths(os.path.join('readers', self.yaml_file))
self.p = mock.patch.object(VIIRSActiveFiresTextFileHandler, '__bases__', (FakeModFiresTextFileHandler,))
self.fake_handler = self.p.start()
self.p.is_local = True
def tearDown(self):
"""Stop wrapping the text file handler."""
self.p.stop()
def test_init(self, mock_obj):
"""Test basic init with no extra parameters."""
from satpy.readers import load_reader
r = load_reader(self.reader_configs)
loadables = r.select_files_from_pathnames([
'AFEDR_j01_d20180829_t2015451_e2017093_b35434_c20180829210527716708_cspp_dev.txt'
])
self.assertEqual(len(loadables), 1)
r.create_filehandlers(loadables)
self.assertTrue(r.file_handlers)
def test_load_dataset(self, csv_mock):
"""Test loading all datasets."""
from satpy.readers import load_reader
r = load_reader(self.reader_configs)
loadables = r.select_files_from_pathnames([
'AFEDR_j01_d20180829_t2015451_e2017093_b35434_c20180829210527716708_cspp_dev.txt'
])
r.create_filehandlers(loadables)
datasets = r.load(['confidence_pct'])
self.assertEqual(len(datasets), 1)
for v in datasets.values():
self.assertEqual(v.attrs['units'], '%')
datasets = r.load(['T13'])
self.assertEqual(len(datasets), 1)
for v in datasets.values():
self.assertEqual(v.attrs['units'], 'K')
datasets = r.load(['power'])
self.assertEqual(len(datasets), 1)
for v in datasets.values():
self.assertEqual(v.attrs['units'], 'MW')
self.assertEqual(v.attrs['platform_name'], 'NOAA-20')
self.assertEqual(v.attrs['sensor'], 'VIIRS')
@mock.patch('satpy.readers.viirs_edr_active_fires.dd.read_csv')
class TestImgVIIRSActiveFiresText(unittest.TestCase):
"""Test VIIRS Fires Reader."""
yaml_file = 'viirs_edr_active_fires.yaml'
def setUp(self):
"""Wrap file handler with own fake file handler."""
from satpy._config import config_search_paths
from satpy.readers.viirs_edr_active_fires import VIIRSActiveFiresTextFileHandler
self.reader_configs = config_search_paths(os.path.join('readers', self.yaml_file))
self.p = mock.patch.object(VIIRSActiveFiresTextFileHandler, '__bases__', (FakeImgFiresTextFileHandler,))
self.fake_handler = self.p.start()
self.p.is_local = True
def tearDown(self):
"""Stop wrapping the text file handler."""
self.p.stop()
def test_init(self, mock_obj):
"""Test basic init with no extra parameters."""
from satpy.readers import load_reader
r = load_reader(self.reader_configs)
loadables = r.select_files_from_pathnames([
'AFIMG_npp_d20180829_t2015451_e2017093_b35434_c20180829210527716708_cspp_dev.txt'
])
self.assertEqual(len(loadables), 1)
r.create_filehandlers(loadables)
self.assertTrue(r.file_handlers)
def test_load_dataset(self, mock_obj):
"""Test loading all datasets."""
from satpy.readers import load_reader
r = load_reader(self.reader_configs)
loadables = r.select_files_from_pathnames([
'AFIMG_npp_d20180829_t2015451_e2017093_b35434_c20180829210527716708_cspp_dev.txt'
])
r.create_filehandlers(loadables)
datasets = r.load(['confidence_cat'])
self.assertEqual(len(datasets), 1)
for v in datasets.values():
self.assertEqual(v.attrs['units'], '1')
self.assertEqual(v.attrs['flag_meanings'], ['low', 'medium', 'high'])
self.assertEqual(v.attrs['flag_values'], [7, 8, 9])
datasets = r.load(['T4'])
self.assertEqual(len(datasets), 1)
for v in datasets.values():
self.assertEqual(v.attrs['units'], 'K')
datasets = r.load(['power'])
self.assertEqual(len(datasets), 1)
for v in datasets.values():
self.assertEqual(v.attrs['units'], 'MW')
self.assertEqual(v.attrs['platform_name'], 'Suomi-NPP')
self.assertEqual(v.attrs['sensor'], 'VIIRS')
|
pytroll/satpy
|
satpy/tests/reader_tests/test_viirs_edr_active_fires.py
|
Python
|
gpl-3.0
| 16,145
|
[
"NetCDF"
] |
8de3ab9e2e8fcf367da7cefa128c6cc42f28f7a4c85eba226eacbe1d82e6a7fa
|
"""
Copyright (C) 2014, Jaguar Land Rover
This program is licensed under the terms and conditions of the
Mozilla Public License, version 2.0. The full text of the
Mozilla Public License is at https://www.mozilla.org/MPL/2.0/
Maintainer: Rudolf Streif (rstreif@jaguarlandrover.com)
Author: David Thiriez (david.thiriez@p3-group.com)
Log invoked services.
"""
import os, threading, base64, json
from urlparse import urlparse
import Queue
from rvijsonrpc import RVIJSONRPCServer
from dateutil import parser
import requests, pytz
import __init__
from __init__ import __RVI_LOGGER__ as rvi_logger
from servicehistory.tasks import send_service_invoked_by_guest
from server.utils import get_setting
from django.contrib.auth.models import User
from vehicles.models import Vehicle
from servicehistory.models import ServiceInvokedHistory
# globals
package_queue = Queue.Queue()
SERVER_NAME = "Log Invoked Service Server: "
transaction_id = 0
# Log Invoked Service Callback Server
class LogInvokedServicesServer(threading.Thread):
"""
RPC server thread responding to Remote callbacks from the RVI framework.
i.e. record service request that occur at the vehicle/RasPi
"""
def __init__(self, service_edge, callback_url, service_id):
self.service_edge = service_edge
self.service_id = service_id
self.callback_url = callback_url
threading.Thread.__init__(self)
url = urlparse(self.callback_url)
self.localServer = RVIJSONRPCServer(addr=((url.hostname, url.port)), logRequests=False)
self.register_services()
def register_services(self):
# register callback functions with RPC server
self.localServer.register_function(log_invoked_service, self.service_id + "/report/serviceinvoked")
# register services with RVI framework
result = self.service_edge.register_service(service = self.service_id+'/report/serviceinvoked',
network_address = self.callback_url)
rvi_logger.info(SERVER_NAME + 'Registration: %s', result['service'])
def run(self):
self.localServer.serve_forever()
def shutdown(self):
self.localServer.shutdown()
# Callback functions
def log_invoked_service(username, vehicleVIN, service, latitude, longitude, timestamp):
rvi_logger.info(SERVER_NAME + 'Create new remote request: \n'
'username: %s\n'
'vehicleVIN: %s\n'
'service: %s\n'
'latitude: %s\n'
'longitude: %s\n'
'timestamp: %s'
, username, vehicleVIN, service, latitude, longitude, timestamp)
t1 = threading.Thread(target=thread_log_invoked_service, args=(
username,
vehicleVIN,
service,
latitude,
longitude,
timestamp,
))
t1.start()
return {u'status': 0}
# Support (thread) functions
def thread_log_invoked_service(username, vehicleVIN, service, latitude, longitude, timestamp):
try:
serviceinvoked = validate_log_invoked_service(username, vehicleVIN, service, latitude, longitude, timestamp)
except Exception:
rvi_logger.exception(SERVER_NAME + 'Received data did not pass validation')
serviceinvoked.save()
rvi_logger.info(SERVER_NAME + 'Saved log of the following service invoked record: %s', serviceinvoked)
vehicle = Vehicle.objects.get(veh_vin = vehicleVIN)
owner_username = vehicle.list_account()
if owner_username != username:
send_service_invoked_by_guest(owner_username, username, vehicleVIN, service)
# Validation functions
def validate_log_invoked_service(username, vehicleVIN, service, latitude, longitude, timestamp):
try:
user = User.objects.get(username=username)
vehicle = Vehicle.objects.get(veh_vin=vehicleVIN)
service_timestamp = parser.parse(str(timestamp).replace('T', ' ').replace('0Z',' +0000'))
api_key = get_setting("GOOGLE_API_KEY")
if api_key:
address = reverse_geocode_googleapi(latitude, longitude, api_key)
else:
address = str(latitude)+', '+str(longitude)
except User.DoesNotExist:
rvi_logger.error(SERVER_NAME + 'username does not exist: %s', username)
raise
except Vehicle.DoesNotExist:
rvi_logger.error(SERVER_NAME + 'VIN does not exist: %s', vehicleVIN)
raise
except Exception as e:
rvi_logger.error(SERVER_NAME + 'Generic Error: %s', e)
raise
return ServiceInvokedHistory(
hist_user = user,
hist_service = service,
hist_latitude = latitude,
hist_longitude = longitude,
hist_address = address,
hist_vehicle = vehicle,
hist_timestamp = service_timestamp
)
# Support functions
def reverse_geocode_googleapi(latitude, longitude, api_key):
# Sensor set to True since GPS coords coming from the mobile app
sensor = 'true'
base = "https://maps.googleapis.com/maps/api/geocode/json?"
params = "latlng={lat},{lon}&sensor={sen}&key={key}".format(
lat=latitude,
lon=longitude,
sen=sensor,
key=api_key
)
url = "{base}{params}".format(base=base, params=params)
response = requests.get(url)
# rvi_logger.info('Google Detailed Response: %s', response.content)
rvi_logger.info('Google JSON Response: %s', response.json)
return response.json()['results'][0]['formatted_address']
|
dvthiriez/rvi_backend
|
server/loginvokedserviceserver.py
|
Python
|
mpl-2.0
| 5,510
|
[
"Jaguar"
] |
2fd3bdafeae981a4b70fcee745e08a56a22270690fc54124824ca0b17b4a6a1b
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import pandas.util.testing as pdt
from numpy.random import normal
import pandas as pd
import scipy
import copy
from skbio.util import assert_data_frame_almost_equal
from skbio.stats.composition import (closure, multiplicative_replacement,
perturb, perturb_inv, power, inner,
clr, clr_inv, ilr, ilr_inv,
centralize, _holm_bonferroni, ancom)
class CompositionTests(TestCase):
def setUp(self):
# Compositional data
self.cdata1 = np.array([[2, 2, 6],
[4, 4, 2]])
self.cdata2 = np.array([2, 2, 6])
self.cdata3 = np.array([[1, 2, 3, 0, 5],
[1, 0, 0, 4, 5],
[1, 2, 3, 4, 5]])
self.cdata4 = np.array([1, 2, 3, 0, 5])
self.cdata5 = [[2, 2, 6], [4, 4, 2]]
self.cdata6 = [[1, 2, 3, 0, 5],
[1, 0, 0, 4, 5],
[1, 2, 3, 4, 5]]
self.cdata7 = [np.exp(1), 1, 1]
self.cdata8 = [np.exp(1), 1, 1, 1]
# Simplicial orthonormal basis obtained from Gram-Schmidt
self.ortho1 = [[0.44858053, 0.10905743, 0.22118102, 0.22118102],
[0.3379924, 0.3379924, 0.0993132, 0.22470201],
[0.3016453, 0.3016453, 0.3016453, 0.09506409]]
# Real data
self.rdata1 = [[0.70710678, -0.70710678, 0., 0.],
[0.40824829, 0.40824829, -0.81649658, 0.],
[0.28867513, 0.28867513, 0.28867513, -0.8660254]]
# Bad datasets
# negative count
self.bad1 = np.array([1, 2, -1])
# zero count
self.bad2 = np.array([[[1, 2, 3, 0, 5]]])
def test_closure(self):
npt.assert_allclose(closure(self.cdata1),
np.array([[.2, .2, .6],
[.4, .4, .2]]))
npt.assert_allclose(closure(self.cdata2),
np.array([.2, .2, .6]))
npt.assert_allclose(closure(self.cdata5),
np.array([[.2, .2, .6],
[.4, .4, .2]]))
with self.assertRaises(ValueError):
closure(self.bad1)
with self.assertRaises(ValueError):
closure(self.bad2)
# make sure that inplace modification is not occurring
closure(self.cdata2)
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_closure_warning(self):
with self.assertRaises(ValueError):
closure([0., 0., 0.])
with self.assertRaises(ValueError):
closure([[0., 0., 0.],
[0., 5., 5.]])
def test_perturb(self):
pmat = perturb(closure(self.cdata1),
closure(np.array([1, 1, 1])))
npt.assert_allclose(pmat,
np.array([[.2, .2, .6],
[.4, .4, .2]]))
pmat = perturb(closure(self.cdata1),
closure(np.array([10, 10, 20])))
npt.assert_allclose(pmat,
np.array([[.125, .125, .75],
[1./3, 1./3, 1./3]]))
pmat = perturb(closure(self.cdata1),
closure(np.array([10, 10, 20])))
npt.assert_allclose(pmat,
np.array([[.125, .125, .75],
[1./3, 1./3, 1./3]]))
pmat = perturb(closure(self.cdata2),
closure([1, 2, 1]))
npt.assert_allclose(pmat, np.array([1./6, 2./6, 3./6]))
pmat = perturb(closure(self.cdata5),
closure(np.array([1, 1, 1])))
npt.assert_allclose(pmat,
np.array([[.2, .2, .6],
[.4, .4, .2]]))
with self.assertRaises(ValueError):
perturb(closure(self.cdata5), self.bad1)
# make sure that inplace modification is not occurring
perturb(self.cdata2, [1, 2, 3])
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_power(self):
pmat = power(closure(self.cdata1), 2)
npt.assert_allclose(pmat,
np.array([[.04/.44, .04/.44, .36/.44],
[.16/.36, .16/.36, .04/.36]]))
pmat = power(closure(self.cdata2), 2)
npt.assert_allclose(pmat, np.array([.04, .04, .36])/.44)
pmat = power(closure(self.cdata5), 2)
npt.assert_allclose(pmat,
np.array([[.04/.44, .04/.44, .36/.44],
[.16/.36, .16/.36, .04/.36]]))
with self.assertRaises(ValueError):
power(self.bad1, 2)
# make sure that inplace modification is not occurring
power(self.cdata2, 4)
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_perturb_inv(self):
pmat = perturb_inv(closure(self.cdata1),
closure([.1, .1, .1]))
imat = perturb(closure(self.cdata1),
closure([10, 10, 10]))
npt.assert_allclose(pmat, imat)
pmat = perturb_inv(closure(self.cdata1),
closure([1, 1, 1]))
npt.assert_allclose(pmat,
closure([[.2, .2, .6],
[.4, .4, .2]]))
pmat = perturb_inv(closure(self.cdata5),
closure([.1, .1, .1]))
imat = perturb(closure(self.cdata1), closure([10, 10, 10]))
npt.assert_allclose(pmat, imat)
with self.assertRaises(ValueError):
perturb_inv(closure(self.cdata1), self.bad1)
# make sure that inplace modification is not occurring
perturb_inv(self.cdata2, [1, 2, 3])
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_inner(self):
a = inner(self.cdata5, self.cdata5)
npt.assert_allclose(a, np.array([[0.80463264, -0.50766667],
[-0.50766667, 0.32030201]]))
b = inner(self.cdata7, self.cdata7)
npt.assert_allclose(b, 0.66666666666666663)
# Make sure that orthogonality holds
npt.assert_allclose(inner(self.ortho1, self.ortho1), np.identity(3),
rtol=1e-04, atol=1e-06)
with self.assertRaises(ValueError):
inner(self.cdata1, self.cdata8)
# make sure that inplace modification is not occurring
inner(self.cdata1, self.cdata1)
npt.assert_allclose(self.cdata1,
np.array([[2, 2, 6],
[4, 4, 2]]))
def test_multiplicative_replacement(self):
amat = multiplicative_replacement(closure(self.cdata3))
npt.assert_allclose(amat,
np.array([[0.087273, 0.174545, 0.261818,
0.04, 0.436364],
[0.092, 0.04, 0.04, 0.368, 0.46],
[0.066667, 0.133333, 0.2,
0.266667, 0.333333]]),
rtol=1e-5, atol=1e-5)
amat = multiplicative_replacement(closure(self.cdata4))
npt.assert_allclose(amat,
np.array([0.087273, 0.174545, 0.261818,
0.04, 0.436364]),
rtol=1e-5, atol=1e-5)
amat = multiplicative_replacement(closure(self.cdata6))
npt.assert_allclose(amat,
np.array([[0.087273, 0.174545, 0.261818,
0.04, 0.436364],
[0.092, 0.04, 0.04, 0.368, 0.46],
[0.066667, 0.133333, 0.2,
0.266667, 0.333333]]),
rtol=1e-5, atol=1e-5)
with self.assertRaises(ValueError):
multiplicative_replacement(self.bad1)
with self.assertRaises(ValueError):
multiplicative_replacement(self.bad2)
# make sure that inplace modification is not occurring
multiplicative_replacement(self.cdata4)
npt.assert_allclose(self.cdata4, np.array([1, 2, 3, 0, 5]))
def multiplicative_replacement_warning(self):
with self.assertRaises(ValueError):
multiplicative_replacement([0, 1, 2], delta=1)
def test_clr(self):
cmat = clr(closure(self.cdata1))
A = np.array([.2, .2, .6])
B = np.array([.4, .4, .2])
npt.assert_allclose(cmat,
[np.log(A / np.exp(np.log(A).mean())),
np.log(B / np.exp(np.log(B).mean()))])
cmat = clr(closure(self.cdata2))
A = np.array([.2, .2, .6])
npt.assert_allclose(cmat,
np.log(A / np.exp(np.log(A).mean())))
cmat = clr(closure(self.cdata5))
A = np.array([.2, .2, .6])
B = np.array([.4, .4, .2])
npt.assert_allclose(cmat,
[np.log(A / np.exp(np.log(A).mean())),
np.log(B / np.exp(np.log(B).mean()))])
with self.assertRaises(ValueError):
clr(self.bad1)
with self.assertRaises(ValueError):
clr(self.bad2)
# make sure that inplace modification is not occurring
clr(self.cdata2)
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_clr_inv(self):
npt.assert_allclose(clr_inv(self.rdata1), self.ortho1)
npt.assert_allclose(clr(clr_inv(self.rdata1)), self.rdata1)
# make sure that inplace modification is not occurring
clr_inv(self.rdata1)
npt.assert_allclose(self.rdata1,
np.array([[0.70710678, -0.70710678, 0., 0.],
[0.40824829, 0.40824829,
-0.81649658, 0.],
[0.28867513, 0.28867513,
0.28867513, -0.8660254]]))
def test_centralize(self):
cmat = centralize(closure(self.cdata1))
npt.assert_allclose(cmat,
np.array([[0.22474487, 0.22474487, 0.55051026],
[0.41523958, 0.41523958, 0.16952085]]))
cmat = centralize(closure(self.cdata5))
npt.assert_allclose(cmat,
np.array([[0.22474487, 0.22474487, 0.55051026],
[0.41523958, 0.41523958, 0.16952085]]))
with self.assertRaises(ValueError):
centralize(self.bad1)
with self.assertRaises(ValueError):
centralize(self.bad2)
# make sure that inplace modification is not occurring
centralize(self.cdata1)
npt.assert_allclose(self.cdata1,
np.array([[2, 2, 6],
[4, 4, 2]]))
def test_ilr(self):
mat = closure(self.cdata7)
npt.assert_array_almost_equal(ilr(mat),
np.array([0.70710678, 0.40824829]))
# Should give same result as inner
npt.assert_allclose(ilr(self.ortho1), np.identity(3),
rtol=1e-04, atol=1e-06)
with self.assertRaises(ValueError):
ilr(self.cdata1, basis=self.cdata1)
# make sure that inplace modification is not occurring
ilr(self.cdata1)
npt.assert_allclose(self.cdata1,
np.array([[2, 2, 6],
[4, 4, 2]]))
def test_ilr_basis(self):
table = np.array([[1., 10.],
[1.14141414, 9.90909091],
[1.28282828, 9.81818182],
[1.42424242, 9.72727273],
[1.56565657, 9.63636364]])
basis = np.array([[0.80442968, 0.19557032]])
res = ilr(table, basis=basis)
exp = np.array([np.log(1/10)*np.sqrt(1/2),
np.log(1.14141414 / 9.90909091)*np.sqrt(1/2),
np.log(1.28282828 / 9.81818182)*np.sqrt(1/2),
np.log(1.42424242 / 9.72727273)*np.sqrt(1/2),
np.log(1.56565657 / 9.63636364)*np.sqrt(1/2)])
npt.assert_allclose(res, exp)
def test_ilr_basis_one_dimension_error(self):
table = np.array([[1., 10.],
[1.14141414, 9.90909091],
[1.28282828, 9.81818182],
[1.42424242, 9.72727273],
[1.56565657, 9.63636364]])
basis = np.array([0.80442968, 0.19557032])
with self.assertRaises(ValueError):
ilr(table, basis=basis)
def test_ilr_inv(self):
mat = closure(self.cdata7)
npt.assert_array_almost_equal(ilr_inv(ilr(mat)), mat)
npt.assert_allclose(ilr_inv(np.identity(3)), self.ortho1,
rtol=1e-04, atol=1e-06)
with self.assertRaises(ValueError):
ilr_inv(self.cdata1, basis=self.cdata1)
# make sure that inplace modification is not occurring
ilr_inv(self.cdata1)
npt.assert_allclose(self.cdata1,
np.array([[2, 2, 6],
[4, 4, 2]]))
def test_ilr_basis_isomorphism(self):
# tests to make sure that the isomorphism holds
# with the introduction of the basis.
basis = np.array([[0.80442968, 0.19557032]])
table = np.array([[np.log(1/10)*np.sqrt(1/2),
np.log(1.14141414 / 9.90909091)*np.sqrt(1/2),
np.log(1.28282828 / 9.81818182)*np.sqrt(1/2),
np.log(1.42424242 / 9.72727273)*np.sqrt(1/2),
np.log(1.56565657 / 9.63636364)*np.sqrt(1/2)]]).T
res = ilr(ilr_inv(table, basis=basis), basis=basis)
npt.assert_allclose(res, table.squeeze())
table = np.array([[1., 10.],
[1.14141414, 9.90909091],
[1.28282828, 9.81818182],
[1.42424242, 9.72727273],
[1.56565657, 9.63636364]])
res = ilr_inv(np.atleast_2d(ilr(table, basis=basis)).T, basis=basis)
npt.assert_allclose(res, closure(table.squeeze()))
def test_ilr_inv_basis(self):
exp = closure(np.array([[1., 10.],
[1.14141414, 9.90909091],
[1.28282828, 9.81818182],
[1.42424242, 9.72727273],
[1.56565657, 9.63636364]]))
basis = np.array([[0.80442968, 0.19557032]])
table = np.array([[np.log(1/10)*np.sqrt(1/2),
np.log(1.14141414 / 9.90909091)*np.sqrt(1/2),
np.log(1.28282828 / 9.81818182)*np.sqrt(1/2),
np.log(1.42424242 / 9.72727273)*np.sqrt(1/2),
np.log(1.56565657 / 9.63636364)*np.sqrt(1/2)]]).T
res = ilr_inv(table, basis=basis)
npt.assert_allclose(res, exp)
def test_ilr_inv_basis_one_dimension_error(self):
basis = clr(np.array([[0.80442968, 0.19557032]]))
table = np.array([[np.log(1/10)*np.sqrt(1/2),
np.log(1.14141414 / 9.90909091)*np.sqrt(1/2),
np.log(1.28282828 / 9.81818182)*np.sqrt(1/2),
np.log(1.42424242 / 9.72727273)*np.sqrt(1/2),
np.log(1.56565657 / 9.63636364)*np.sqrt(1/2)]]).T
with self.assertRaises(ValueError):
ilr_inv(table, basis=basis)
class AncomTests(TestCase):
def setUp(self):
# Basic count data with 2 groupings
self.table1 = pd.DataFrame([
[10, 10, 10, 20, 20, 20],
[11, 12, 11, 21, 21, 21],
[10, 11, 10, 10, 11, 10],
[10, 11, 10, 10, 10, 9],
[10, 11, 10, 10, 10, 10],
[10, 11, 10, 10, 10, 11],
[10, 13, 10, 10, 10, 12]]).T
self.cats1 = pd.Series([0, 0, 0, 1, 1, 1])
# Real valued data with 2 groupings
D, L = 40, 80
np.random.seed(0)
self.table2 = np.vstack((np.concatenate((normal(10, 1, D),
normal(200, 1, D))),
np.concatenate((normal(20, 1, D),
normal(100000, 1, D))),
normal(10, 1, L),
normal(10, 1, L),
np.concatenate((normal(20, 1, D),
normal(100000, 1, D))),
normal(10, 1, L),
normal(10, 1, L),
normal(10, 1, L),
normal(10, 1, L)))
self.table2 = np.absolute(self.table2)
self.table2 = pd.DataFrame(self.table2.astype(np.int).T)
self.cats2 = pd.Series([0]*D + [1]*D)
# Real valued data with 2 groupings and no significant difference
self.table3 = pd.DataFrame([
[10, 10.5, 10, 10, 10.5, 10.3],
[11, 11.5, 11, 11, 11.5, 11.3],
[10, 10.5, 10, 10, 10.5, 10.2],
[10, 10.5, 10, 10, 10.5, 10.3],
[10, 10.5, 10, 10, 10.5, 10.1],
[10, 10.5, 10, 10, 10.5, 10.6],
[10, 10.5, 10, 10, 10.5, 10.4]]).T
self.cats3 = pd.Series([0, 0, 0, 1, 1, 1])
# Real valued data with 3 groupings
D, L = 40, 120
np.random.seed(0)
self.table4 = np.vstack((np.concatenate((normal(10, 1, D),
normal(200, 1, D),
normal(400, 1, D))),
np.concatenate((normal(20, 1, D),
normal(100000, 1, D),
normal(2000, 1, D))),
normal(10, 1, L),
normal(10, 1, L),
np.concatenate((normal(20, 1, D),
normal(100000, 1, D),
normal(2000, 1, D))),
normal(10, 1, L),
normal(10, 1, L),
normal(10, 1, L),
normal(10, 1, L)))
self.table4 = np.absolute(self.table4)
self.table4 = pd.DataFrame(self.table4.astype(np.int).T)
self.cats4 = pd.Series([0]*D + [1]*D + [2]*D)
# Noncontiguous case
self.table5 = pd.DataFrame([
[11, 12, 21, 11, 21, 21],
[10, 11, 10, 10, 11, 10],
[10, 11, 10, 10, 10, 9],
[10, 11, 10, 10, 10, 10],
[10, 11, 10, 10, 10, 11],
[10, 10, 20, 9, 20, 20],
[10, 13, 10, 10, 10, 12]]).T
self.cats5 = pd.Series([0, 0, 1, 0, 1, 1])
# Different number of classes case
self.table6 = pd.DataFrame([
[11, 12, 9, 11, 21, 21],
[10, 11, 10, 10, 11, 10],
[10, 11, 10, 10, 10, 9],
[10, 11, 10, 10, 10, 10],
[10, 11, 10, 10, 10, 11],
[10, 10, 10, 9, 20, 20],
[10, 13, 10, 10, 10, 12]]).T
self.cats6 = pd.Series([0, 0, 0, 0, 1, 1])
# Categories are letters
self.table7 = pd.DataFrame([
[11, 12, 9, 11, 21, 21],
[10, 11, 10, 10, 11, 10],
[10, 11, 10, 10, 10, 9],
[10, 11, 10, 10, 10, 10],
[10, 11, 10, 10, 10, 11],
[10, 10, 10, 9, 20, 20],
[10, 13, 10, 10, 10, 12]]).T
self.cats7 = pd.Series(['a', 'a', 'a', 'a', 'b', 'b'])
# Swap samples
self.table8 = pd.DataFrame([
[10, 10, 10, 20, 20, 20],
[11, 12, 11, 21, 21, 21],
[10, 11, 10, 10, 11, 10],
[10, 11, 10, 10, 10, 9],
[10, 11, 10, 10, 10, 10],
[10, 11, 10, 10, 10, 11],
[10, 13, 10, 10, 10, 12]]).T
self.table8.index = ['a', 'b', 'c',
'd', 'e', 'f']
self.cats8 = pd.Series([0, 0, 1, 0, 1, 1],
index=['a', 'b', 'd',
'c', 'e', 'f'])
# Real valued data with 3 groupings
D, L = 40, 120
np.random.seed(0)
self.table9 = np.vstack((np.concatenate((normal(10, 1, D),
normal(200, 1, D),
normal(400, 1, D))),
np.concatenate((normal(200000, 1, D),
normal(10, 1, D),
normal(2000, 1, D))),
normal(10, 10, L),
normal(10, 10, L),
np.concatenate((normal(2000, 1, D),
normal(100000, 1, D),
normal(2000, 1, D))),
normal(10000, 1000, L),
normal(10, 10, L),
normal(10, 10, L),
normal(10, 10, L),
normal(10000, 1000, L),
normal(10, 10, L),
normal(10, 10, L),
normal(10, 10, L),
np.concatenate((normal(2000, 1, D),
normal(100000, 1, D),
normal(2000, 1, D))),
normal(10000, 1000, L),
normal(10, 10, L),
normal(10, 10, L),
normal(10, 10, L)))
self.table9 = np.absolute(self.table9)+1
self.table9 = pd.DataFrame(self.table9.astype(np.int).T)
self.cats9 = pd.Series([0]*D + [1]*D + [2]*D)
# Real valued data with 2 groupings
D, L = 40, 80
np.random.seed(0)
self.table10 = np.vstack((np.concatenate((normal(10, 1, D),
normal(200, 1, D))),
np.concatenate((normal(10, 1, D),
normal(200, 1, D))),
np.concatenate((normal(20, 10, D),
normal(100, 10, D))),
normal(10, 1, L),
np.concatenate((normal(200, 100, D),
normal(100000, 100, D))),
np.concatenate((normal(200000, 100, D),
normal(300, 100, D))),
np.concatenate((normal(200000, 100, D),
normal(300, 100, D))),
np.concatenate((normal(20, 20, D),
normal(40, 10, D))),
np.concatenate((normal(20, 20, D),
normal(40, 10, D))),
np.concatenate((normal(20, 20, D),
normal(40, 10, D))),
normal(100, 10, L),
normal(100, 10, L),
normal(1000, 10, L),
normal(1000, 10, L),
normal(10, 10, L),
normal(10, 10, L),
normal(10, 10, L),
normal(10, 10, L)))
self.table10 = np.absolute(self.table10) + 1
self.table10 = pd.DataFrame(self.table10.astype(np.int).T)
self.cats10 = pd.Series([0]*D + [1]*D)
# zero count
self.bad1 = pd.DataFrame(np.array([
[10, 10, 10, 20, 20, 0],
[11, 11, 11, 21, 21, 21],
[10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10]]).T)
# negative count
self.bad2 = pd.DataFrame(np.array([
[10, 10, 10, 20, 20, 1],
[11, 11, 11, 21, 21, 21],
[10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, -1],
[10, 10, 10, 10, 10, 10]]).T)
# missing count
self.bad3 = pd.DataFrame(np.array([
[10, 10, 10, 20, 20, 1],
[11, 11, 11, 21, 21, 21],
[10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, np.nan],
[10, 10, 10, 10, 10, 10]]).T)
self.badcats1 = pd.Series([0, 0, 0, 1, np.nan, 1])
self.badcats2 = pd.Series([0, 0, 0, 0, 0, 0])
self.badcats3 = pd.Series([0, 0, 1, 1])
self.badcats4 = pd.Series(range(len(self.table1)))
self.badcats5 = pd.Series([1]*len(self.table1))
def test_ancom_basic_counts(self):
test_table = pd.DataFrame(self.table1)
original_table = copy.deepcopy(test_table)
test_cats = pd.Series(self.cats1)
original_cats = copy.deepcopy(test_cats)
result = ancom(test_table,
test_cats,
multiple_comparisons_correction=None)
# Test to make sure that the input table hasn't be altered
assert_data_frame_almost_equal(original_table, test_table)
# Test to make sure that the input table hasn't be altered
pdt.assert_series_equal(original_cats, test_cats)
exp = pd.DataFrame(
{'W': np.array([5, 5, 2, 2, 2, 2, 2]),
'Reject null hypothesis': np.array([True, True, False, False,
False, False, False],
dtype=bool)})
assert_data_frame_almost_equal(result[0], exp)
def test_ancom_percentiles(self):
table = pd.DataFrame([[12, 11],
[9, 11],
[1, 11],
[22, 100],
[20, 53],
[23, 1]],
index=['s1', 's2', 's3', 's4', 's5', 's6'],
columns=['b1', 'b2'])
grouping = pd.Series(['a', 'a', 'a', 'b', 'b', 'b'],
index=['s1', 's2', 's3', 's4', 's5', 's6'])
percentiles = [0.0, 25.0, 50.0, 75.0, 100.0]
groups = ['a', 'b']
tuples = [(p, g) for g in groups for p in percentiles]
exp_mi = pd.MultiIndex.from_tuples(tuples,
names=['Percentile', 'Group'])
exp_data = np.array(
[[1.0, 11.0], [5.0, 11.0], [9.0, 11.0], [10.5, 11.0], [12.0, 11.0],
[20.0, 1.0], [21.0, 27.0], [22.0, 53.0], [22.5, 76.5],
[23.0, 100.0]])
exp = pd.DataFrame(exp_data.T, columns=exp_mi, index=['b1', 'b2'])
result = ancom(table, grouping)[1]
assert_data_frame_almost_equal(result, exp)
def test_ancom_percentiles_alt_categories(self):
table = pd.DataFrame([[12],
[9],
[1],
[22],
[20],
[23]],
index=['s1', 's2', 's3', 's4', 's5', 's6'],
columns=['b1'])
grouping = pd.Series(['a', 'a', 'c', 'b', 'b', 'c'],
index=['s1', 's2', 's3', 's4', 's5', 's6'])
percentiles = [0.0, 25.0, 50.0, 75.0, 100.0]
groups = ['a', 'b', 'c']
tuples = [(p, g) for g in groups for p in percentiles]
exp_mi = pd.MultiIndex.from_tuples(tuples,
names=['Percentile', 'Group'])
exp_data = np.array([[9.0], [9.75], [10.5], [11.25], [12.0], # a
[20.0], [20.5], [21.0], [21.5], [22.0], # b
[1.0], [6.5], [12.0], [17.5], [23.0]]) # c
exp = pd.DataFrame(exp_data.T, columns=exp_mi, index=['b1'])
result = ancom(table, grouping, percentiles=percentiles)[1]
assert_data_frame_almost_equal(result, exp)
def test_ancom_alt_percentiles(self):
table = pd.DataFrame([[12],
[9],
[1],
[22],
[20],
[23]],
index=['s1', 's2', 's3', 's4', 's5', 's6'],
columns=['b1'])
grouping = pd.Series(['a', 'a', 'a', 'b', 'b', 'b'],
index=['s1', 's2', 's3', 's4', 's5', 's6'])
percentiles = [42.0, 50.0]
groups = ['a', 'b']
tuples = [(p, g) for g in groups for p in percentiles]
exp_mi = pd.MultiIndex.from_tuples(tuples,
names=['Percentile', 'Group'])
exp_data = np.array([[7.71999999], [9.0], # a
[21.68], [22.0]]) # b
exp = pd.DataFrame(exp_data.T, columns=exp_mi, index=['b1'])
result = ancom(table, grouping, percentiles=percentiles)[1]
assert_data_frame_almost_equal(result, exp)
def test_ancom_percentiles_swapped(self):
table = pd.DataFrame([[12],
[9],
[1],
[22],
[20],
[23]],
index=['s1', 's2', 's3', 's4', 's5', 's6'],
columns=['b1'])
grouping = pd.Series(['a', 'a', 'b', 'a', 'b', 'b'],
index=['s1', 's2', 's4', 's3', 's5', 's6'])
percentiles = [42.0, 50.0]
groups = ['a', 'b']
tuples = [(p, g) for g in groups for p in percentiles]
exp_mi = pd.MultiIndex.from_tuples(tuples,
names=['Percentile', 'Group'])
exp_data = np.array([[7.71999999], [9.0], # a
[21.68], [22.0]]) # b
exp = pd.DataFrame(exp_data.T, columns=exp_mi, index=['b1'])
result = ancom(table, grouping, percentiles=percentiles)[1]
assert_data_frame_almost_equal(result, exp)
def test_ancom_percentile_order_unimportant(self):
table = pd.DataFrame([[12],
[9],
[1],
[22],
[20],
[23]],
index=['s1', 's2', 's3', 's4', 's5', 's6'],
columns=['b1'])
grouping = pd.Series(['a', 'a', 'a', 'b', 'b', 'b'],
index=['s1', 's2', 's3', 's4', 's5', 's6'])
# order of percentiles in unimportant after sorting
result1 = ancom(table, grouping, percentiles=[50.0, 42.0])[1]
result2 = ancom(table, grouping, percentiles=[42.0, 50.0])[1]
assert_data_frame_almost_equal(
result1.sort_index(axis=1), result2.sort_index(axis=1))
def test_ancom_percentiles_iterator(self):
table = pd.DataFrame([[12],
[9],
[1],
[22],
[20],
[23]],
index=['s1', 's2', 's3', 's4', 's5', 's6'],
columns=['b1'])
grouping = pd.Series(['a', 'a', 'a', 'b', 'b', 'b'],
index=['s1', 's2', 's3', 's4', 's5', 's6'])
percentiles = [42.0, 50.0]
groups = ['a', 'b']
tuples = [(p, g) for g in groups for p in percentiles]
exp_mi = pd.MultiIndex.from_tuples(tuples,
names=['Percentile', 'Group'])
exp_data = np.array([[7.71999999], [9.0], # a
[21.68], [22.0]]) # b
exp = pd.DataFrame(exp_data.T, columns=exp_mi, index=['b1'])
result = ancom(table, grouping, percentiles=iter(percentiles))[1]
assert_data_frame_almost_equal(result, exp)
def test_ancom_no_percentiles(self):
table = pd.DataFrame([[12],
[9],
[1],
[22],
[20],
[23]],
index=['s1', 's2', 's3', 's4', 's5', 's6'],
columns=['b1'])
grouping = pd.Series(['a', 'a', 'a', 'b', 'b', 'b'],
index=['s1', 's2', 's3', 's4', 's5', 's6'])
result = ancom(table, grouping, percentiles=[])[1]
assert_data_frame_almost_equal(result, pd.DataFrame())
def test_ancom_percentile_out_of_range(self):
table = pd.DataFrame([[12],
[9],
[1],
[22],
[20],
[23]],
index=['s1', 's2', 's3', 's4', 's5', 's6'],
columns=['b1'])
grouping = pd.Series(['a', 'a', 'a', 'b', 'b', 'b'],
index=['s1', 's2', 's3', 's4', 's5', 's6'])
with self.assertRaises(ValueError):
ancom(table, grouping, percentiles=[-1.0])
with self.assertRaises(ValueError):
ancom(table, grouping, percentiles=[100.1])
with self.assertRaises(ValueError):
ancom(table, grouping, percentiles=[10.0, 3.0, 101.0, 100])
def test_ancom_duplicate_percentiles(self):
table = pd.DataFrame([[12],
[9],
[1],
[22],
[20],
[23]],
index=['s1', 's2', 's3', 's4', 's5', 's6'],
columns=['b1'])
grouping = pd.Series(['a', 'a', 'a', 'b', 'b', 'b'],
index=['s1', 's2', 's3', 's4', 's5', 's6'])
with self.assertRaises(ValueError):
ancom(table, grouping, percentiles=[10.0, 10.0])
def test_ancom_basic_proportions(self):
# Converts from counts to proportions
test_table = pd.DataFrame(closure(self.table1))
original_table = copy.deepcopy(test_table)
test_cats = pd.Series(self.cats1)
original_cats = copy.deepcopy(test_cats)
result = ancom(test_table,
test_cats,
multiple_comparisons_correction=None)
# Test to make sure that the input table hasn't be altered
assert_data_frame_almost_equal(original_table, test_table)
# Test to make sure that the input table hasn't be altered
pdt.assert_series_equal(original_cats, test_cats)
exp = pd.DataFrame(
{'W': np.array([5, 5, 2, 2, 2, 2, 2]),
'Reject null hypothesis': np.array([True, True, False, False,
False, False, False],
dtype=bool)})
assert_data_frame_almost_equal(result[0], exp)
def test_ancom_multiple_groups(self):
test_table = pd.DataFrame(self.table4)
original_table = copy.deepcopy(test_table)
test_cats = pd.Series(self.cats4)
original_cats = copy.deepcopy(test_cats)
result = ancom(test_table, test_cats)
# Test to make sure that the input table hasn't be altered
assert_data_frame_almost_equal(original_table, test_table)
# Test to make sure that the input table hasn't be altered
pdt.assert_series_equal(original_cats, test_cats)
exp = pd.DataFrame(
{'W': np.array([8, 7, 3, 3, 7, 3, 3, 3, 3]),
'Reject null hypothesis': np.array([True, True, False, False,
True, False, False, False,
False], dtype=bool)})
assert_data_frame_almost_equal(result[0], exp)
def test_ancom_noncontiguous(self):
result = ancom(self.table5,
self.cats5,
multiple_comparisons_correction=None)
exp = pd.DataFrame(
{'W': np.array([6, 2, 2, 2, 2, 6, 2]),
'Reject null hypothesis': np.array([True, False, False, False,
False, True, False],
dtype=bool)})
assert_data_frame_almost_equal(result[0], exp)
def test_ancom_unbalanced(self):
result = ancom(self.table6,
self.cats6,
multiple_comparisons_correction=None)
exp = pd.DataFrame(
{'W': np.array([5, 3, 3, 2, 2, 5, 2]),
'Reject null hypothesis': np.array([True, False, False, False,
False, True, False],
dtype=bool)})
assert_data_frame_almost_equal(result[0], exp)
def test_ancom_letter_categories(self):
result = ancom(self.table7,
self.cats7,
multiple_comparisons_correction=None)
exp = pd.DataFrame(
{'W': np.array([5, 3, 3, 2, 2, 5, 2]),
'Reject null hypothesis': np.array([True, False, False, False,
False, True, False],
dtype=bool)})
assert_data_frame_almost_equal(result[0], exp)
def test_ancom_multiple_comparisons(self):
result = ancom(self.table1,
self.cats1,
multiple_comparisons_correction='holm-bonferroni',
significance_test=scipy.stats.mannwhitneyu)
exp = pd.DataFrame(
{'W': np.array([0]*7),
'Reject null hypothesis': np.array([False]*7, dtype=bool)})
assert_data_frame_almost_equal(result[0], exp)
def test_ancom_alternative_test(self):
result = ancom(self.table1,
self.cats1,
multiple_comparisons_correction=None,
significance_test=scipy.stats.ttest_ind)
exp = pd.DataFrame(
{'W': np.array([5, 5, 2, 2, 2, 2, 2]),
'Reject null hypothesis': np.array([True, True, False, False,
False, False, False],
dtype=bool)})
assert_data_frame_almost_equal(result[0], exp)
def test_ancom_normal_data(self):
result = ancom(self.table2,
self.cats2,
multiple_comparisons_correction=None,
significance_test=scipy.stats.ttest_ind)
exp = pd.DataFrame(
{'W': np.array([8, 8, 3, 3, 8, 3, 3, 3, 3]),
'Reject null hypothesis': np.array([True, True, False, False,
True, False, False,
False, False],
dtype=bool)})
assert_data_frame_almost_equal(result[0], exp)
def test_ancom_basic_counts_swapped(self):
result = ancom(self.table8, self.cats8)
exp = pd.DataFrame(
{'W': np.array([5, 5, 2, 2, 2, 2, 2]),
'Reject null hypothesis': np.array([True, True, False, False,
False, False, False],
dtype=bool)})
assert_data_frame_almost_equal(result[0], exp)
def test_ancom_no_signal(self):
result = ancom(self.table3,
self.cats3,
multiple_comparisons_correction=None)
exp = pd.DataFrame(
{'W': np.array([0]*7),
'Reject null hypothesis': np.array([False]*7, dtype=bool)})
assert_data_frame_almost_equal(result[0], exp)
def test_ancom_tau(self):
exp1 = pd.DataFrame(
{'W': np.array([8, 7, 3, 3, 7, 3, 3, 3, 3]),
'Reject null hypothesis': np.array([True, False, False, False,
False, False, False, False,
False], dtype=bool)})
exp2 = pd.DataFrame(
{'W': np.array([17, 17, 5, 6, 16, 5, 7, 5,
4, 5, 8, 4, 5, 16, 5, 11, 4, 6]),
'Reject null hypothesis': np.array([True, True, False, False,
True, False, False, False,
False, False, False, False,
False, True, False, False,
False, False], dtype=bool)})
exp3 = pd.DataFrame(
{'W': np.array([16, 16, 17, 10, 17, 16, 16,
15, 15, 15, 13, 10, 10, 10,
9, 9, 9, 9]),
'Reject null hypothesis': np.array([True, True, True, False,
True, True, True, True,
True, True, True, False,
False, False, False, False,
False, False], dtype=bool)})
result1 = ancom(self.table4, self.cats4,
multiple_comparisons_correction=None, tau=0.25)
result2 = ancom(self.table9, self.cats9,
multiple_comparisons_correction=None, tau=0.02)
result3 = ancom(self.table10, self.cats10,
multiple_comparisons_correction=None, tau=0.02)
assert_data_frame_almost_equal(result1[0], exp1)
assert_data_frame_almost_equal(result2[0], exp2)
assert_data_frame_almost_equal(result3[0], exp3)
def test_ancom_theta(self):
result = ancom(self.table1, self.cats1, theta=0.3)
exp = pd.DataFrame(
{'W': np.array([5, 5, 2, 2, 2, 2, 2]),
'Reject null hypothesis': np.array([True, True, False, False,
False, False, False],
dtype=bool)})
assert_data_frame_almost_equal(result[0], exp)
def test_ancom_alpha(self):
result = ancom(self.table1, self.cats1,
multiple_comparisons_correction=None, alpha=0.5)
exp = pd.DataFrame(
{'W': np.array([6, 6, 4, 5, 5, 4, 2]),
'Reject null hypothesis': np.array([True, True, False, True,
True, False, False],
dtype=bool)})
assert_data_frame_almost_equal(result[0], exp)
def test_ancom_fail_type(self):
with self.assertRaises(TypeError):
ancom(self.table1.values, self.cats1)
with self.assertRaises(TypeError):
ancom(self.table1, self.cats1.values)
def test_ancom_fail_zeros(self):
with self.assertRaises(ValueError):
ancom(self.bad1, self.cats2, multiple_comparisons_correction=None)
def test_ancom_fail_negative(self):
with self.assertRaises(ValueError):
ancom(self.bad2, self.cats2, multiple_comparisons_correction=None)
def test_ancom_fail_not_implemented_multiple_comparisons_correction(self):
with self.assertRaises(ValueError):
ancom(self.table2, self.cats2,
multiple_comparisons_correction='fdr')
def test_ancom_fail_missing(self):
with self.assertRaises(ValueError):
ancom(self.bad3, self.cats1)
with self.assertRaises(ValueError):
ancom(self.table1, self.badcats1)
def test_ancom_fail_groups(self):
with self.assertRaises(ValueError):
ancom(self.table1, self.badcats2)
def test_ancom_fail_size_mismatch(self):
with self.assertRaises(ValueError):
ancom(self.table1, self.badcats3)
def test_ancom_fail_group_unique(self):
with self.assertRaises(ValueError):
ancom(self.table1, self.badcats4)
def test_ancom_fail_1_group(self):
with self.assertRaises(ValueError):
ancom(self.table1, self.badcats5)
def test_ancom_fail_tau(self):
with self.assertRaises(ValueError):
ancom(self.table1, self.cats1, tau=-1)
with self.assertRaises(ValueError):
ancom(self.table1, self.cats1, tau=1.1)
def test_ancom_fail_theta(self):
with self.assertRaises(ValueError):
ancom(self.table1, self.cats1, theta=-1)
with self.assertRaises(ValueError):
ancom(self.table1, self.cats1, theta=1.1)
def test_ancom_fail_alpha(self):
with self.assertRaises(ValueError):
ancom(self.table1, self.cats1, alpha=-1)
with self.assertRaises(ValueError):
ancom(self.table1, self.cats1, alpha=1.1)
def test_ancom_fail_multiple_groups(self):
with self.assertRaises(TypeError):
ancom(self.table4, self.cats4,
significance_test=scipy.stats.ttest_ind)
def test_holm_bonferroni(self):
p = [0.005, 0.011, 0.02, 0.04, 0.13]
corrected_p = p * np.arange(1, 6)[::-1]
guessed_p = _holm_bonferroni(p)
for a, b in zip(corrected_p, guessed_p):
self.assertAlmostEqual(a, b)
if __name__ == "__main__":
main()
|
kdmurray91/scikit-bio
|
skbio/stats/tests/test_composition.py
|
Python
|
bsd-3-clause
| 47,376
|
[
"scikit-bio"
] |
3f22d9eddecf40e9ce616f2f3697318c8003062c2e643901deec414299d8331d
|
import random
import math
import time
import re
import zlib
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from toontown.toonbase.ToonPythonUtil import *
from direct.gui.DirectGui import *
from direct.task import Task
from direct.showbase import PythonUtil
from direct.directnotify import DirectNotifyGlobal
from direct.gui import DirectGuiGlobals
from panda3d.core import *
from panda3d.direct import *
from otp.avatar import LocalAvatar
from otp.login import LeaveToPayDialog
from otp.avatar import PositionExaminer
from otp.otpbase import OTPGlobals
from otp.avatar import DistributedPlayer
from otp.nametag.NametagConstants import *
from otp.margins.WhisperPopup import *
from toontown.shtiker import ShtikerBook
from toontown.shtiker import InventoryPageOLD
from toontown.shtiker import InventoryPageNEW
from toontown.shtiker import MapPage
from toontown.shtiker import OptionsPage
from toontown.shtiker import ShardPage
from toontown.shtiker import QuestPage
from toontown.shtiker import TrackPage
from toontown.shtiker import KartPage
from toontown.shtiker import GardenPage
from toontown.shtiker import GolfPage
from toontown.shtiker import SuitPage
from toontown.shtiker import DisguisePage
from toontown.shtiker import PhotoAlbumPage
from toontown.shtiker import FishPage
from toontown.shtiker import NPCFriendPage
from toontown.shtiker import EventsPage
from toontown.shtiker import TIPPage
from toontown.quest import Quests
from toontown.quest import QuestParser
from toontown.toonbase.ToontownGlobals import *
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
#from toontown.catalog import CatalogNotifyDialog
from toontown.chat import ToontownChatManager
from toontown.chat import TTTalkAssistant
from toontown.estate import GardenGlobals
from toontown.battle.BattleSounds import *
from toontown.battle import Fanfare
from toontown.parties import PartyGlobals
from toontown.toon import ElevatorNotifier
from toontown.toon import ToonDNA
import DistributedToon
import Toon
import LaffMeter
from toontown.quest import QuestMap
from toontown.toon.DistributedNPCToonBase import DistributedNPCToonBase
WantNewsPage = config.GetBool('want-news-page', ToontownGlobals.DefaultWantNewsPageSetting)
from toontown.toontowngui import NewsPageButtonManager
if WantNewsPage:
from toontown.shtiker import NewsPage
AdjustmentForNewsButton = -0.275
ClaraBaseXPos = 0.12
if (__debug__):
import pdb
class LocalToon(DistributedToon.DistributedToon, LocalAvatar.LocalAvatar):
neverDisable = 1
piePowerSpeed = config.GetDouble('pie-power-speed', 0.2)
piePowerExponent = config.GetDouble('pie-power-exponent', 0.75)
def __init__(self, cr):
try:
self.LocalToon_initialized
except:
self.LocalToon_initialized = 1
self.numFlowers = 0
self.maxFlowerBasket = 0
DistributedToon.DistributedToon.__init__(self, cr)
chatMgr = ToontownChatManager.ToontownChatManager(cr, self)
talkAssistant = TTTalkAssistant.TTTalkAssistant()
LocalAvatar.LocalAvatar.__init__(self, cr, chatMgr, talkAssistant, passMessagesThrough=True)
self.soundRun = base.loadSfx('phase_3.5/audio/sfx/AV_footstep_runloop.ogg')
self.soundWalk = base.loadSfx('phase_3.5/audio/sfx/AV_footstep_walkloop.ogg')
self.soundWhisper = base.loadSfx('phase_3.5/audio/sfx/GUI_whisper_3.ogg')
self.soundPhoneRing = base.loadSfx('phase_3.5/audio/sfx/telephone_ring.ogg')
self.soundSystemMessage = base.loadSfx('phase_3/audio/sfx/clock03.ogg')
self.positionExaminer = PositionExaminer.PositionExaminer()
friendsGui = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
friendsButtonNormal = friendsGui.find('**/FriendsBox_Closed')
friendsButtonPressed = friendsGui.find('**/FriendsBox_Rollover')
friendsButtonRollover = friendsGui.find('**/FriendsBox_Rollover')
newScale = oldScale = 0.8
if WantNewsPage:
newScale = oldScale * ToontownGlobals.NewsPageScaleAdjust
self.bFriendsList = DirectButton(image=(friendsButtonNormal, friendsButtonPressed, friendsButtonRollover), relief=None, pos=(-0.141, 0, -0.125), parent=base.a2dTopRight, scale=newScale, text=('', TTLocalizer.FriendsListLabel, TTLocalizer.FriendsListLabel), text_scale=0.09, text_fg=Vec4(1, 1, 1, 1), text_shadow=Vec4(0, 0, 0, 1), text_pos=(0, -0.18), text_font=ToontownGlobals.getInterfaceFont(), command=self.sendFriendsListEvent)
self.bFriendsList.hide()
self.friendsListButtonActive = 0
self.friendsListButtonObscured = 0
self.moveFurnitureButtonObscured = 0
self.clarabelleButtonObscured = 0
friendsGui.removeNode()
self.__furnitureGui = None
self.__lerpFurnitureButton = None
self.__clarabelleButton = None
self.__clarabelleFlash = None
self.furnitureManager = None
self.furnitureDirector = None
self.gotCatalogNotify = 0
self.__catalogNotifyDialog = None
self.accept('phaseComplete-5.5', self.loadPhase55Stuff)
Toon.loadDialog()
self.isIt = 0
self.cantLeaveGame = 0
self.tunnelX = 0.0
self.estate = None
self.__pieBubble = None
self.allowPies = 0
self.__pieButton = None
self.__piePowerMeter = None
self.__piePowerMeterSequence = None
self.__pieButtonType = None
self.__pieButtonCount = None
self.tossPieStart = None
self.__presentingPie = 0
self.__pieSequence = 0
self.wantBattles = config.GetBool('want-battles', 1)
self.seeGhosts = config.GetBool('see-ghosts', 0)
wantNameTagAvIds = config.GetBool('want-nametag-avids', 0)
if wantNameTagAvIds:
messenger.send('nameTagShowAvId', [])
base.idTags = 1
self.glitchX = 0
self.glitchY = 0
self.glitchZ = 0
self.glitchCount = 0
self.ticker = 0
self.glitchOkay = 1
self.tempGreySpacing = 0
self.wantStatePrint = config.GetBool('want-statePrint', 0)
self.__gardeningGui = None
self.__gardeningGuiFake = None
self.__shovelButton = None
self.shovelRelatedDoId = 0
self.shovelAbility = ''
self.plantToWater = 0
self.shovelButtonActiveCount = 0
self.wateringCanButtonActiveCount = 0
self.showingWateringCan = 0
self.showingShovel = 0
self.touchingPlantList = []
self.inGardenAction = None
self.guiConflict = 0
self.lastElevatorLeft = 0
self.elevatorNotifier = ElevatorNotifier.ElevatorNotifier()
self.accept(OTPGlobals.AvatarFriendAddEvent, self.sbFriendAdd)
self.accept(OTPGlobals.AvatarFriendUpdateEvent, self.sbFriendUpdate)
self.accept(OTPGlobals.AvatarFriendRemoveEvent, self.sbFriendRemove)
self._zoneId = None
self.accept('system message aknowledge', self.systemWarning)
self.systemMsgAckGuiDoneEvent = 'systemMsgAckGuiDoneEvent'
self.accept(self.systemMsgAckGuiDoneEvent, self.hideSystemMsgAckGui)
self.systemMsgAckGui = None
self.createSystemMsgAckGui()
if not hasattr(base.cr, 'lastLoggedIn'):
base.cr.lastLoggedIn = self.cr.toontownTimeManager.convertStrToToontownTime('')
self.setLastTimeReadNews(base.cr.lastLoggedIn)
self.acceptingNewFriends = True
self.acceptingNonFriendWhispers = True
self.physControls.event.addAgainPattern('again%in')
self.oldPos = None
self.questMap = None
self.prevToonIdx = 0
def setDNA(self, dna):
base.localAvatarStyle = dna
DistributedToon.DistributedToon.setDNA(self, dna)
def setName(self, name):
base.localAvatarName = name
DistributedToon.DistributedToon.setName(self, name)
def wantLegacyLifter(self):
return True
def startGlitchKiller(self):
if localAvatar.getZoneId() not in GlitchKillerZones:
return
if __dev__:
self.glitchMessage = 'START GLITCH KILLER'
randChoice = random.randint(0, 3)
if randChoice == 0:
self.glitchMessage = 'START GLITCH KILLER'
elif randChoice == 1:
self.glitchMessage = 'GLITCH KILLER ENGAGED'
elif randChoice == 2:
self.glitchMessage = 'GLITCH KILLER GO!'
elif randChoice == 3:
self.glitchMessage = 'GLITCH IN YO FACE FOOL!'
self.notify.debug(self.glitchMessage)
taskMgr.remove(self.uniqueName('glitchKiller'))
taskMgr.add(self.glitchKiller, self.uniqueName('glitchKiller'))
self.glitchOkay = 1
def pauseGlitchKiller(self):
self.tempGreySpacing = 1
def unpauseGlitchKiller(self):
self.tempGreySpacing = 0
def stopGlitchKiller(self):
if __dev__ and hasattr(self, 'glitchMessage'):
if self.glitchMessage == 'START GLITCH KILLER':
self.notify.debug('STOP GLITCH KILLER')
elif self.glitchMessage == 'GLITCH KILLER ENGAGED':
self.notify.debug('GLITCH KILLER DISENGAGED')
elif self.glitchMessage == 'GLITCH KILLER GO!':
self.notify.debug('GLITCH KILLER NO GO!')
elif self.glitchMessage == 'GLITCH IN YO FACE FOOL!':
self.notify.debug('GLITCH OFF YO FACE FOOL!')
taskMgr.remove(self.uniqueName('glitchKiller'))
self.glitchOkay = 1
def glitchKiller(self, taskFooler = 0):
if base.greySpacing or self.tempGreySpacing:
return Task.cont
self.ticker += 1
if not self.physControls.lifter.hasContact() and not self.glitchOkay:
self.glitchCount += 1
else:
self.glitchX = self.getX()
self.glitchY = self.getY()
self.glitchZ = self.getZ()
self.glitchCount = 0
if self.physControls.lifter.hasContact():
self.glitchOkay = 0
if hasattr(self, 'physControls'):
if self.ticker >= 10:
self.ticker = 0
if self.glitchCount >= 7:
print 'GLITCH MAXED!!! resetting pos'
self.setX(self.glitchX - 1 * (self.getX() - self.glitchX))
self.setY(self.glitchY - 1 * (self.getY() - self.glitchY))
self.glitchCount = 0
return Task.cont
def announceGenerate(self):
self.startLookAround()
DistributedToon.DistributedToon.announceGenerate(self)
acceptingNewFriends = settings.get('acceptingNewFriends', {})
acceptingNonFriendWhispers = settings.get('acceptingNonFriendWhispers', {})
acceptingTeleport = settings.get('acceptingTeleport', {})
if str(self.doId) not in acceptingNewFriends:
acceptingNewFriends[str(self.doId)] = True
settings['acceptingNewFriends'] = acceptingNewFriends
if str(self.doId) not in acceptingNonFriendWhispers:
acceptingNonFriendWhispers[str(self.doId)] = True
settings['acceptingNonFriendWhispers'] = acceptingNonFriendWhispers
if str(self.doId) not in acceptingTeleport:
acceptingTeleport[str(self.doId)] = True
settings['acceptingTeleport'] = acceptingTeleport
self.acceptingNewFriends = acceptingNewFriends[str(self.doId)]
self.acceptingNonFriendWhispers = acceptingNonFriendWhispers[str(self.doId)]
self.acceptingTeleport = acceptingTeleport[str(self.doId)]
def disable(self):
self.laffMeter.destroy()
del self.laffMeter
self.questMap.destroy()
self.questMap = None
if hasattr(self, 'purchaseButton'):
self.purchaseButton.destroy()
del self.purchaseButton
self.newsButtonMgr.request('Off')
self.book.unload()
del self.optionsPage
del self.shardPage
del self.mapPage
del self.invPage
del self.questPage
del self.suitPage
del self.sosPage
del self.disguisePage
del self.fishPage
del self.gardenPage
del self.trackPage
del self.book
if base.wantKarts:
if hasattr(self, 'kartPage'):
del self.kartPage
if base.wantNametags:
self.nametag.unmanage(base.marginManager)
self.ignoreAll()
DistributedToon.DistributedToon.disable(self)
taskMgr.remove(self.uniqueName('KeepAliveTimeout'))
return
def disableBodyCollisions(self):
pass
def delete(self):
try:
self.LocalToon_deleted
except:
self.LocalToon_deleted = 1
Toon.unloadDialog()
QuestParser.clear()
DistributedToon.DistributedToon.delete(self)
LocalAvatar.LocalAvatar.delete(self)
self.bFriendsList.destroy()
del self.bFriendsList
if self.__pieButton:
self.__pieButton.destroy()
self.__pieButton = None
if self.__piePowerMeter:
self.__piePowerMeter.destroy()
self.__piePowerMeter = None
taskMgr.remove('unlockGardenButtons')
if self.__lerpFurnitureButton:
self.__lerpFurnitureButton.finish()
if self.__furnitureGui:
self.__furnitureGui.destroy()
del self.__furnitureGui
if self.__gardeningGui:
self.__gardeningGui.destroy()
del self.__gardeningGui
if self.__gardeningGuiFake:
self.__gardeningGuiFake.destroy()
del self.__gardeningGuiFake
if self.__clarabelleButton:
self.__clarabelleButton.destroy()
del self.__clarabelleButton
if self.__clarabelleFlash:
self.__clarabelleFlash.finish()
del self.__clarabelleFlash
if self.__catalogNotifyDialog:
self.__catalogNotifyDialog.cleanup()
del self.__catalogNotifyDialog
taskMgr.remove('KeepAliveTimeout')
return
def initInterface(self):
self.newsButtonMgr = NewsPageButtonManager.NewsPageButtonManager()
self.newsButtonMgr.request('Hidden')
self.book = ShtikerBook.ShtikerBook('bookDone')
self.book.load()
self.book.hideButton()
self.optionsPage = OptionsPage.OptionsPage()
self.optionsPage.load()
self.book.addPage(self.optionsPage, pageName=TTLocalizer.OptionsPageTitle)
self.shardPage = ShardPage.ShardPage()
self.shardPage.load()
self.book.addPage(self.shardPage, pageName=TTLocalizer.ShardPageTitle)
self.mapPage = MapPage.MapPage()
self.mapPage.load()
self.book.addPage(self.mapPage, pageName=TTLocalizer.MapPageTitle)
if settings['newGui'] == True:
self.invPage = InventoryPageNEW.InventoryPageNEW()
self.invPage.load()
else:
self.invPage = InventoryPageOLD.InventoryPageOLD()
self.invPage.load()
self.book.addPage(self.invPage, pageName=TTLocalizer.InventoryPageTitle)
self.questPage = QuestPage.QuestPage()
self.questPage.load()
self.book.addPage(self.questPage, pageName=TTLocalizer.QuestPageToonTasks)
self.trackPage = TrackPage.TrackPage()
self.trackPage.load()
self.book.addPage(self.trackPage, pageName=TTLocalizer.TrackPageShortTitle)
self.suitPage = SuitPage.SuitPage()
self.suitPage.load()
self.book.addPage(self.suitPage, pageName=TTLocalizer.SuitPageTitle)
self.fishPage = FishPage.FishPage()
self.fishPage.setAvatar(self)
self.fishPage.load()
self.book.addPage(self.fishPage, pageName=TTLocalizer.FishPageTitle)
if base.wantKarts:
self.addKartPage()
if self.disguisePageFlag:
self.loadDisguisePages()
if self.sosPageFlag:
self.loadSosPages()
if self.gardenStarted:
self.loadGardenPages()
self.addGolfPage()
self.photoPage = PhotoAlbumPage.PhotoAlbumPage()
self.photoPage.load()
self.book.addPage(self.photoPage, pageName=TTLocalizer.PhotoPageTitle)
self.addEventsPage()
if WantNewsPage:
self.addNewsPage()
self.book.setPage(self.mapPage, enterPage=False)
self.laffMeter = LaffMeter.LaffMeter(self.style, self.hp, self.maxHp)
self.laffMeter.setAvatar(self)
self.laffMeter.setScale(0.075)
self.laffMeter.reparentTo(base.a2dBottomLeft)
if self.style.getAnimal() == 'monkey':
self.laffMeter.setPos(0.153, 0.0, 0.13)
else:
self.laffMeter.setPos(0.133, 0.0, 0.13)
self.laffMeter.stop()
self.questMap = QuestMap.QuestMap(self)
self.questMap.stop()
if not base.cr.isPaid():
guiButton = loader.loadModel('phase_3/models/gui/quit_button')
self.purchaseButton = DirectButton(parent=aspect2d, relief=None, image=(guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')), image_scale=0.9, text=TTLocalizer.OptionsPagePurchase, text_scale=0.05, text_pos=(0, -0.01), textMayChange=0, pos=(0.885, 0, -0.94), sortOrder=100, command=self.__handlePurchase)
base.setCellsAvailable([base.bottomCells[4]], 0)
self.accept('time-insert', self.__beginTossPie)
self.accept('time-insert-up', self.__endTossPie)
self.accept('time-delete', self.__beginTossPie)
self.accept('time-delete-up', self.__endTossPie)
self.accept('pieHit', self.__pieHit)
self.accept('interrupt-pie', self.interruptPie)
self.accept('InputState-jump', self.__toonMoved)
self.accept('InputState-forward', self.__toonMoved)
self.accept('InputState-reverse', self.__toonMoved)
self.accept('InputState-turnLeft', self.__toonMoved)
self.accept('InputState-turnRight', self.__toonMoved)
self.accept('InputState-slide', self.__toonMoved)
QuestParser.init()
return
def __handlePurchase(self):
self.purchaseButton.hide()
if (base.cr.isWebPlayToken() or __dev__):
if base.cr.isPaid():
if base.cr.productName in ['DisneyOnline-UK', 'DisneyOnline-AP', 'JP', 'DE', 'BR', 'FR']:
paidNoParentPassword = launcher and launcher.getParentPasswordSet()
else:
paidNoParentPassword = launcher and not launcher.getParentPasswordSet()
else:
paidNoParentPassword = 0
self.leaveToPayDialog = LeaveToPayDialog.LeaveToPayDialog(paidNoParentPassword, self.purchaseButton.show)
self.leaveToPayDialog.show()
else:
self.notify.error('You should not get here without a PlayToken')
if base.wantKarts:
def addKartPage(self):
if self.hasKart():
if hasattr(self, 'kartPage') and self.kartPage != None:
return
if not launcher.getPhaseComplete(6):
self.acceptOnce('phaseComplete-6', self.addKartPage)
return
self.kartPage = KartPage.KartPage()
self.kartPage.setAvatar(self)
self.kartPage.load()
self.book.addPage(self.kartPage, pageName=TTLocalizer.KartPageTitle)
return
def setWantBattles(self, wantBattles):
self.wantBattles = wantBattles
def loadDisguisePages(self):
if self.disguisePage != None:
return
if not launcher.getPhaseComplete(9):
self.acceptOnce('phaseComplete-9', self.loadDisguisePages)
return
self.disguisePage = DisguisePage.DisguisePage()
self.disguisePage.load()
self.book.addPage(self.disguisePage, pageName=TTLocalizer.DisguisePageTitle)
self.loadSosPages()
return
def loadSosPages(self):
if self.sosPage != None:
return
self.sosPage = NPCFriendPage.NPCFriendPage()
self.sosPage.load()
self.book.addPage(self.sosPage, pageName=TTLocalizer.NPCFriendPageTitle)
return
def loadGardenPages(self):
if self.gardenPage != None:
return
if not launcher.getPhaseComplete(5.5):
self.acceptOnce('phaseComplete-5.5', self.loadPhase55Stuff)
return
self.gardenPage = GardenPage.GardenPage()
self.gardenPage.load()
self.book.addPage(self.gardenPage, pageName=TTLocalizer.GardenPageTitle)
return
def loadPhase55Stuff(self):
if self.gardenPage == None:
self.gardenPage = GardenPage.GardenPage()
self.gardenPage.load()
self.book.addPage(self.gardenPage, pageName=TTLocalizer.GardenPageTitle)
elif not launcher.getPhaseComplete(5.5):
self.acceptOnce('phaseComplete-5.5', self.loadPhase55Stuff)
self.refreshOnscreenButtons()
return
def setAsGM(self, state):
self.notify.debug('Setting GM State: %s in LocalToon' % state)
DistributedToon.DistributedToon.setAsGM(self, state)
if self.gmState:
if config.GetString('gm-nametag-string', '') != '':
self.gmNameTagString = config.GetString('gm-nametag-string')
if config.GetString('gm-nametag-color', '') != '':
self.gmNameTagColor = config.GetString('gm-nametag-color')
if config.GetInt('gm-nametag-enabled', 0):
self.gmNameTagEnabled = 1
self.d_updateGMNameTag()
def displayTalkWhisper(self, fromId, avatarName, rawString, mods):
sender = base.cr.identifyAvatar(fromId)
if sender:
chatString, scrubbed = sender.scrubTalk(rawString, mods)
else:
chatString, scrubbed = self.scrubTalk(rawString, mods)
sender = self
sfx = self.soundWhisper
chatString = avatarName + ': ' + chatString
whisper = WhisperPopup(chatString, OTPGlobals.getInterfaceFont(), WhisperPopup.WTNormal)
whisper.setClickable(avatarName, fromId)
whisper.manage(base.marginManager)
base.playSfx(sfx)
def displayTalkAccount(self, fromId, senderName, rawString, mods):
sender = None
playerInfo = None
sfx = self.soundWhisper
playerInfo = base.cr.playerFriendsManager.playerId2Info.get(fromId, None)
if playerInfo == None:
return
senderAvId = base.cr.playerFriendsManager.findAvIdFromPlayerId(fromId)
if not senderName and base.cr.playerFriendsManager.playerId2Info.get(fromId):
senderName = base.cr.playerFriendsManager.playerId2Info.get(fromId).playerName
senderAvatar = base.cr.identifyAvatar(senderAvId)
if sender:
chatString, scrubbed = senderAvatar.scrubTalk(rawString, mods)
else:
chatString, scrubbed = self.scrubTalk(rawString, mods)
chatString = senderName + ': ' + chatString
whisper = WhisperPopup(chatString, OTPGlobals.getInterfaceFont(), WhisperPopup.WTNormal)
if playerInfo != None:
whisper.setClickable(senderName, fromId, 1)
whisper.manage(base.marginManager)
base.playSfx(sfx)
return
def isLocal(self):
return 1
def canChat(self):
if not self.cr.allowAnyTypedChat():
return 0
if self.commonChatFlags & (ToontownGlobals.CommonChat | ToontownGlobals.SuperChat):
return 1
if base.cr.whiteListChatEnabled:
return 1
for friendId, flags in self.friendsList:
if flags & ToontownGlobals.FriendChat:
return 1
return 0
def startChat(self):
if self.tutorialAck:
self.notify.info('calling LocalAvatar.startchat')
LocalAvatar.LocalAvatar.startChat(self)
self.accept('chatUpdateSCToontask', self.b_setSCToontask)
self.accept('chatUpdateSCResistance', self.d_reqSCResistance)
self.accept('chatUpdateSCSinging', self.b_setSCSinging)
self.accept('whisperUpdateSCToontask', self.whisperSCToontaskTo)
else:
self.notify.info('NOT calling LocalAvatar.startchat, in tutorial')
def stopChat(self):
LocalAvatar.LocalAvatar.stopChat(self)
self.ignore('chatUpdateSCToontask')
self.ignore('chatUpdateSCResistance')
self.ignore('chatUpdateSCSinging')
self.ignore('whisperUpdateSCToontask')
def tunnelIn(self, tunnelOrigin):
self.b_setTunnelIn(self.tunnelX * 0.8, tunnelOrigin)
def tunnelOut(self, tunnelOrigin):
self.tunnelX = self.getX(tunnelOrigin)
tunnelY = self.getY(tunnelOrigin)
self.b_setTunnelOut(self.tunnelX * 0.95, tunnelY, tunnelOrigin)
def handleTunnelIn(self, startTime, endX, x, y, z, h):
self.notify.debug('LocalToon.handleTunnelIn')
tunnelOrigin = render.attachNewNode('tunnelOrigin')
tunnelOrigin.setPosHpr(x, y, z, h, 0, 0)
self.b_setAnimState('run', self.animMultiplier)
self.stopLookAround()
self.reparentTo(render)
self.runSound()
camera.reparentTo(render)
camera.setPosHpr(tunnelOrigin, 0, 20, 12, 180, -20, 0)
base.transitions.irisIn(0.4)
avHeight = max(base.localAvatar.getHeight(), 3.0)
scaleFactor = avHeight * 0.3333333333
toonTrack = self.getTunnelInToonTrack(endX, tunnelOrigin)
toonTrack.append(Func(camera.wrtReparentTo, base.localAvatar))
toonTrack.append(camera.posHprInterval(.8, (0, -9 * scaleFactor, avHeight), (0, 0, 0), other=base.localAvatar, blendType='easeInOut'))
def cleanup(self = self, tunnelOrigin = tunnelOrigin):
self.stopSound()
tunnelOrigin.removeNode()
messenger.send('tunnelInMovieDone')
self.tunnelTrack = Sequence(toonTrack, Func(cleanup))
self.tunnelTrack.start(globalClock.getFrameTime() - startTime)
def handleTunnelOut(self, startTime, startX, startY, x, y, z, h):
self.notify.debug('LocalToon.handleTunnelOut')
tunnelOrigin = render.attachNewNode('tunnelOrigin')
tunnelOrigin.setPosHpr(x, y, z, h, 0, 0)
self.b_setAnimState('run', self.animMultiplier)
self.runSound()
self.stopLookAround()
tracks = Parallel()
camera.wrtReparentTo(render)
startPos = camera.getPos(tunnelOrigin)
startHpr = camera.getHpr(tunnelOrigin)
camLerpDur = 1.0
reducedCamH = fitDestAngle2Src(startHpr[0], 180)
tracks.append(LerpPosHprInterval(camera, camLerpDur, pos=Point3(0, 20, 12), hpr=Point3(reducedCamH, -20, 0), startPos=startPos, startHpr=startHpr, other=tunnelOrigin, blendType='easeInOut', name='tunnelOutLerpCamPos'))
toonTrack = self.getTunnelOutToonTrack(startX, startY, tunnelOrigin)
tracks.append(toonTrack)
irisDur = 0.4
tracks.append(Sequence(Wait(toonTrack.getDuration() - (irisDur + 0.1)), Func(base.transitions.irisOut, irisDur)))
def cleanup(self = self, tunnelOrigin = tunnelOrigin):
self.stopSound()
self.detachNode()
tunnelOrigin.removeNode()
messenger.send('tunnelOutMovieDone')
self.tunnelTrack = Sequence(tracks, Func(cleanup))
self.tunnelTrack.start(globalClock.getFrameTime() - startTime)
def getPieBubble(self):
if self.__pieBubble == None:
bubble = CollisionSphere(0, 0, 0, 1)
node = CollisionNode('pieBubble')
node.addSolid(bubble)
node.setFromCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.CameraBitmask | ToontownGlobals.FloorBitmask)
node.setIntoCollideMask(BitMask32.allOff())
self.__pieBubble = NodePath(node)
self.pieHandler = CollisionHandlerEvent()
self.pieHandler.addInPattern('pieHit')
self.pieHandler.addInPattern('pieHit-%in')
return self.__pieBubble
def __beginTossPieMouse(self, mouseParam):
self.__beginTossPie(globalClock.getFrameTime())
def __endTossPieMouse(self, mouseParam):
self.__endTossPie(globalClock.getFrameTime())
def __beginTossPie(self, time):
if self.tossPieStart != None:
return
if not self.allowPies:
return
if self.hp < 1:
return
if self.numPies == 0:
messenger.send('outOfPies')
return
if self.__pieInHand():
return
if getattr(self.controlManager.currentControls, 'isAirborne', 0):
return
messenger.send('wakeup')
self.localPresentPie(time)
taskName = self.uniqueName('updatePiePower')
taskMgr.add(self.__updatePiePower, taskName)
return
def __endTossPie(self, time):
if self.tossPieStart == None:
return
taskName = self.uniqueName('updatePiePower')
taskMgr.remove(taskName)
messenger.send('wakeup')
power = self.__getPiePower(time)
self.tossPieStart = None
self.localTossPie(power)
return
def localPresentPie(self, time):
import TTEmote
from otp.avatar import Emote
self.__stopPresentPie()
if self.tossTrack:
tossTrack = self.tossTrack
self.tossTrack = None
tossTrack.finish()
self.interruptPie()
self.tossPieStart = time
self.__pieSequence = self.__pieSequence + 1 & 255
sequence = self.__pieSequence
self.__presentingPie = 1
pos = self.getPos()
hpr = self.getHpr()
timestamp32 = globalClockDelta.getFrameNetworkTime(bits=32)
self.sendUpdate('presentPie', [pos[0],
pos[1],
pos[2],
hpr[0] % 360.0,
timestamp32])
Emote.globalEmote.disableBody(self)
messenger.send('begin-pie')
ival = self.getPresentPieInterval(pos[0], pos[1], pos[2], hpr[0])
ival = Sequence(ival, name=self.uniqueName('localPresentPie'))
self.tossTrack = ival
ival.start()
self.makePiePowerMeter()
self.__piePowerMeter.show()
self.__piePowerMeterSequence = sequence
self.__piePowerMeter['value'] = 0
return
def __stopPresentPie(self):
if self.__presentingPie:
import TTEmote
from otp.avatar import Emote
Emote.globalEmote.releaseBody(self)
messenger.send('end-pie')
self.__presentingPie = 0
taskName = self.uniqueName('updatePiePower')
taskMgr.remove(taskName)
def __getPiePower(self, time):
elapsed = max(time - self.tossPieStart, 0.0)
t = elapsed / self.piePowerSpeed
t = math.pow(t, self.piePowerExponent)
power = int(t * 100) % 200
if power > 100:
power = 200 - power
return power
def __updatePiePower(self, task):
if not self.__piePowerMeter:
return Task.done
self.__piePowerMeter['value'] = self.__getPiePower(globalClock.getFrameTime())
return Task.cont
def interruptPie(self):
self.cleanupPieInHand()
self.__stopPresentPie()
if self.__piePowerMeter:
self.__piePowerMeter.hide()
pie = self.pieTracks.get(self.__pieSequence)
if pie and pie.getT() < 14.0 / 24.0:
del self.pieTracks[self.__pieSequence]
pie.pause()
def __pieInHand(self):
pie = self.pieTracks.get(self.__pieSequence)
return pie and pie.getT() < 15.0 / 24.0
def __toonMoved(self, isSet):
if isSet:
self.interruptPie()
def localTossPie(self, power):
if not self.__presentingPie:
return
pos = self.getPos()
hpr = self.getHpr()
timestamp32 = globalClockDelta.getFrameNetworkTime(bits=32)
sequence = self.__pieSequence
if self.tossTrack:
tossTrack = self.tossTrack
self.tossTrack = None
tossTrack.finish()
if self.pieTracks.has_key(sequence):
pieTrack = self.pieTracks[sequence]
del self.pieTracks[sequence]
pieTrack.finish()
if self.splatTracks.has_key(sequence):
splatTrack = self.splatTracks[sequence]
del self.splatTracks[sequence]
splatTrack.finish()
self.makePiePowerMeter()
self.__piePowerMeter['value'] = power
self.__piePowerMeter.show()
self.__piePowerMeterSequence = sequence
pieBubble = self.getPieBubble().instanceTo(NodePath())
def pieFlies(self = self, pos = pos, hpr = hpr, sequence = sequence, power = power, timestamp32 = timestamp32, pieBubble = pieBubble):
self.sendUpdate('tossPie', [pos[0],
pos[1],
pos[2],
hpr[0] % 360.0,
sequence,
power,
self.pieThrowType,
timestamp32])
if self.numPies != ToontownGlobals.FullPies:
self.setNumPies(self.numPies - 1)
base.cTrav.addCollider(pieBubble, self.pieHandler)
toss, pie, flyPie = self.getTossPieInterval(pos[0], pos[1], pos[2], hpr[0], power, self.pieThrowType, beginFlyIval=Func(pieFlies))
pieBubble.reparentTo(flyPie)
flyPie.setTag('pieSequence', str(sequence))
toss = Sequence(toss)
self.tossTrack = toss
toss.start()
pie = Sequence(pie, Func(base.cTrav.removeCollider, pieBubble), Func(self.pieFinishedFlying, sequence))
self.pieTracks[sequence] = pie
pie.start()
return
def pieFinishedFlying(self, sequence):
DistributedToon.DistributedToon.pieFinishedFlying(self, sequence)
if self.__piePowerMeterSequence == sequence:
self.__piePowerMeter.hide()
def __finishPieTrack(self, sequence):
if self.pieTracks.has_key(sequence):
pieTrack = self.pieTracks[sequence]
del self.pieTracks[sequence]
pieTrack.finish()
def __pieHit(self, entry):
if not entry.hasSurfacePoint() or not entry.hasInto():
return
if not entry.getInto().isTangible():
return
sequence = int(entry.getFromNodePath().getNetTag('pieSequence'))
self.__finishPieTrack(sequence)
if self.splatTracks.has_key(sequence):
splatTrack = self.splatTracks[sequence]
del self.splatTracks[sequence]
splatTrack.finish()
pieCode = 0
pieCodeStr = entry.getIntoNodePath().getNetTag('pieCode')
if pieCodeStr:
pieCode = int(pieCodeStr)
pos = entry.getSurfacePoint(render)
timestamp32 = globalClockDelta.getFrameNetworkTime(bits=32)
self.sendUpdate('pieSplat', [pos[0],
pos[1],
pos[2],
sequence,
pieCode,
timestamp32])
splat = self.getPieSplatInterval(pos[0], pos[1], pos[2], pieCode)
splat = Sequence(splat, Func(self.pieFinishedSplatting, sequence))
self.splatTracks[sequence] = splat
splat.start()
messenger.send('pieSplat', [self, pieCode])
messenger.send('localPieSplat', [pieCode, entry])
def beginAllowPies(self):
self.allowPies = 1
self.updatePieButton()
def endAllowPies(self):
self.allowPies = 0
self.updatePieButton()
def makePiePowerMeter(self):
from direct.gui.DirectGui import DirectWaitBar, DGG
if self.__piePowerMeter == None:
self.__piePowerMeter = DirectWaitBar(frameSize=(-0.2,
0.2,
-0.03,
0.03), relief=DGG.SUNKEN, borderWidth=(0.005, 0.005), barColor=(0.4, 0.6, 1.0, 1), pos=(0, 0.1, 0.8))
self.__piePowerMeter.hide()
return
def updatePieButton(self):
from toontown.toonbase import ToontownBattleGlobals
from direct.gui.DirectGui import DirectButton, DGG
wantButton = 0
if self.allowPies and self.numPies > 0:
wantButton = 1
if not launcher.getPhaseComplete(5):
wantButton = 0
haveButton = self.__pieButton != None
if not haveButton and not wantButton:
return
if haveButton and not wantButton:
self.__pieButton.destroy()
self.__pieButton = None
self.__pieButtonType = None
self.__pieButtonCount = None
return
if self.__pieButtonType != self.pieType:
if self.__pieButton:
self.__pieButton.destroy()
self.__pieButton = None
if self.__pieButton == None:
inv = self.inventory
if self.pieType >= len(inv.invModels[ToontownBattleGlobals.THROW_TRACK]):
gui = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
pieGui = gui.find('**/summons')
pieScale = 0.1
else:
gui = None
pieGui = (inv.invModels[ToontownBattleGlobals.THROW_TRACK][self.pieType],)
pieScale = 0.85
self.__pieButton = DirectButton(image=(inv.upButton, inv.downButton, inv.rolloverButton), geom=pieGui, text='50', text_scale=0.04, text_align=TextNode.ARight, geom_scale=pieScale, geom_pos=(-0.01, 0, 0), text_fg=Vec4(1, 1, 1, 1), text_pos=(0.07, -0.04), relief=None, image_color=(0, 0.6, 1, 1), pos=(0, 0.1, 0.9))
self.__pieButton.bind(DGG.B1PRESS, self.__beginTossPieMouse)
self.__pieButton.bind(DGG.B1RELEASE, self.__endTossPieMouse)
self.__pieButtonType = self.pieType
self.__pieButtonCount = None
if gui:
del gui
if self.__pieButtonCount != self.numPies:
if self.numPies == ToontownGlobals.FullPies:
self.__pieButton['text'] = ''
else:
self.__pieButton['text'] = str(self.numPies)
self.__pieButtonCount = self.numPies
return
def displayWhisper(self, fromId, chatString, whisperType):
sender = None
sfx = self.soundWhisper
if fromId == TTLocalizer.Clarabelle:
chatString = TTLocalizer.Clarabelle + ': ' + chatString
sfx = self.soundPhoneRing
elif fromId != 0:
sender = base.cr.identifyAvatar(fromId)
if whisperType == WhisperPopup.WTNormal or whisperType == WhisperPopup.WTQuickTalker:
if sender == None:
return
chatString = sender.getName() + ': ' + chatString
elif whisperType == WhisperPopup.WTSystem:
sfx = self.soundSystemMessage
whisper = WhisperPopup(chatString, OTPGlobals.getInterfaceFont(), whisperType)
if sender != None:
whisper.setClickable(sender.getName(), fromId)
whisper.manage(base.marginManager)
base.playSfx(sfx)
return
def displaySystemClickableWhisper(self, fromId, chatString, whisperType):
sender = None
sfx = self.soundWhisper
if fromId == TTLocalizer.Clarabelle:
chatString = TTLocalizer.Clarabelle + ': ' + chatString
sfx = self.soundPhoneRing
elif fromId != 0:
sender = base.cr.identifyAvatar(fromId)
if whisperType == WhisperPopup.WTNormal or whisperType == WhisperPopup.WTQuickTalker:
if sender == None:
return
chatString = sender.getName() + ': ' + chatString
elif whisperType == WhisperPopup.WTSystem:
sfx = self.soundSystemMessage
whisper = WhisperPopup(chatString, OTPGlobals.getInterfaceFont(), whisperType)
whisper.setClickable('', fromId)
whisper.manage(base.marginManager)
base.playSfx(sfx)
return
def clickedWhisper(self, doId, isPlayer = None):
if doId > 0:
LocalAvatar.LocalAvatar.clickedWhisper(self, doId, isPlayer)
else:
foundCanStart = False
for partyInfo in self.hostedParties:
if partyInfo.status == PartyGlobals.PartyStatus.CanStart:
foundCanStart = True
break
if base.cr and base.cr.playGame and base.cr.playGame.getPlace() and base.cr.playGame.getPlace().fsm:
fsm = base.cr.playGame.getPlace().fsm
curState = fsm.getCurrentState().getName()
if curState == 'walk':
if hasattr(self, 'eventsPage'):
desiredMode = -1
if doId == -1:
desiredMode = EventsPage.EventsPage_Invited
elif foundCanStart:
desiredMode = EventsPage.EventsPage_Host
if desiredMode >= 0:
self.book.setPage(self.eventsPage)
self.eventsPage.setMode(desiredMode)
fsm.request('stickerBook')
def loadFurnitureGui(self):
if self.__furnitureGui:
return
guiModels = loader.loadModel('phase_5.5/models/gui/house_design_gui')
self.__furnitureGui = DirectFrame(relief=None, parent=base.a2dTopLeft, pos=(0.115, 0.0, -0.66), scale=0.04, image=guiModels.find('**/attic'))
DirectLabel(parent=self.__furnitureGui, relief=None, image=guiModels.find('**/rooftile'))
bMoveStartUp = guiModels.find('**/bu_attic/bu_attic_up')
bMoveStartDown = guiModels.find('**/bu_attic/bu_attic_down')
bMoveStartRollover = guiModels.find('**/bu_attic/bu_attic_rollover')
DirectButton(parent=self.__furnitureGui, relief=None, image=[bMoveStartUp,
bMoveStartDown,
bMoveStartRollover,
bMoveStartUp], text=['', TTLocalizer.HDMoveFurnitureButton, TTLocalizer.HDMoveFurnitureButton], text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), text_font=ToontownGlobals.getInterfaceFont(), pos=(-0.3, 0, 9.4), command=self.__startMoveFurniture)
self.__furnitureGui.hide()
guiModels.removeNode()
return
def showFurnitureGui(self):
self.loadFurnitureGui()
self.__furnitureGui.show()
def hideFurnitureGui(self):
if self.__furnitureGui:
self.__furnitureGui.hide()
def clarabelleNewsPageCollision(self, show = True):
if self.__clarabelleButton == None:
return
claraXPos = ClaraBaseXPos
#notifyXPos = CatalogNotifyDialog.CatalogNotifyBaseXPos
if show:
claraXPos += AdjustmentForNewsButton
#notifyXPos += AdjustmentForNewsButton
newPos = (claraXPos, 1.0, -0.63)
self.__clarabelleButton.setPos(newPos)
#if self.__catalogNotifyDialog == None or self.__catalogNotifyDialog.frame == None:
#return
#notifyPos = self.__catalogNotifyDialog.frame.getPos()
#notifyPos[0] = notifyXPos
#self.__catalogNotifyDialog.frame.setPos(notifyPos)
return
def loadClarabelleGui(self):
if self.__clarabelleButton:
return
guiItems = loader.loadModel('phase_5.5/models/gui/catalog_gui')
circle = guiItems.find('**/cover/blue_circle')
icon = guiItems.find('**/cover/clarabelle')
icon.reparentTo(circle)
rgba = VBase4(0.71589, 0.784547, 0.974, 1.0)
white = VBase4(1.0, 1.0, 1.0, 1.0)
icon.setColor(white)
claraXPos = ClaraBaseXPos
newScale = oldScale = 0.5
newPos = (claraXPos, 1.0, -0.63)
if WantNewsPage:
claraXPos += AdjustmentForNewsButton
oldPos = ((claraXPos, 1.0, -0.63))
newScale = oldScale * ToontownGlobals.NewsPageScaleAdjust
newPos = (claraXPos - 0.1, 1.0, -0.63)
self.__clarabelleButton = DirectButton(relief=None, image=circle, text='', text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), text_scale=0.1, text_pos=(-1.06, 1.06), text_font=ToontownGlobals.getInterfaceFont(), pos=newPos, scale=newScale, command=self.__handleClarabelleButton)
self.__clarabelleButton.reparentTo(base.a2dTopRight, DGG.BACKGROUND_SORT_INDEX - 1)
button = self.__clarabelleButton.stateNodePath[0]
self.__clarabelleFlash = Sequence(LerpColorInterval(button, 2, white, blendType='easeInOut'), LerpColorInterval(button, 2, rgba, blendType='easeInOut'))
self.__clarabelleFlash.loop()
self.__clarabelleFlash.pause()
return
def showClarabelleGui(self, mailboxItems):
self.loadClarabelleGui()
if mailboxItems:
self.__clarabelleButton['text'] = ['', TTLocalizer.CatalogNewDeliveryButton, TTLocalizer.CatalogNewDeliveryButton]
else:
self.__clarabelleButton['text'] = ['', TTLocalizer.CatalogNewCatalogButton, TTLocalizer.CatalogNewCatalogButton]
if not self.mailboxNotify and not self.awardNotify and self.catalogNotify == ToontownGlobals.OldItems and (self.simpleMailNotify != ToontownGlobals.NoItems or self.inviteMailNotify != ToontownGlobals.NoItems):
self.__clarabelleButton['text'] = ['', TTLocalizer.MailNewMailButton, TTLocalizer.MailNewMailButton]
if self.newsButtonMgr.isNewIssueButtonShown() and WantNewsPage:
self.clarabelleNewsPageCollision(True)
self.__clarabelleButton.show()
self.__clarabelleFlash.resume()
def hideClarabelleGui(self):
if self.__clarabelleButton:
self.__clarabelleButton.hide()
self.__clarabelleFlash.pause()
def __handleClarabelleButton(self):
self.stopMoveFurniture()
place = base.cr.playGame.getPlace()
if place == None:
self.notify.warning('Tried to go home, but place is None.')
return
if self.__catalogNotifyDialog:
self.__catalogNotifyDialog.cleanup()
self.__catalogNotifyDialog = None
if config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: VISITESTATE: Visit estate')
place.goHomeNow(self.lastHood)
return
def __startMoveFurniture(self):
self.oldPos = self.getPos()
if config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: ESTATE: Furniture Placement')
if self.cr.furnitureManager != None:
self.cr.furnitureManager.d_suggestDirector(self.doId)
elif self.furnitureManager != None:
self.furnitureManager.d_suggestDirector(self.doId)
return
def stopMoveFurniture(self):
base.localAvatar.controlManager.collisionsOn()
if self.oldPos:
self.setPos(self.oldPos)
if self.furnitureManager != None:
self.furnitureManager.d_suggestDirector(0)
return
def setFurnitureDirector(self, avId, furnitureManager):
if avId == 0:
if self.furnitureManager == furnitureManager:
messenger.send('exitFurnitureMode', [furnitureManager])
self.furnitureManager = None
self.furnitureDirector = None
elif avId != self.doId:
if self.furnitureManager == None or self.furnitureDirector != avId:
self.furnitureManager = furnitureManager
self.furnitureDirector = avId
messenger.send('enterFurnitureMode', [furnitureManager, 0])
else:
if self.furnitureManager != None:
messenger.send('exitFurnitureMode', [self.furnitureManager])
self.furnitureManager = None
self.furnitureManager = furnitureManager
self.furnitureDirector = avId
messenger.send('enterFurnitureMode', [furnitureManager, 1])
self.refreshOnscreenButtons()
return
def getAvPosStr(self):
pos = self.getPos()
hpr = self.getHpr()
serverVersion = base.cr.getServerVersion()
districtName = base.cr.getShardName(base.localAvatar.defaultShard)
if hasattr(base.cr.playGame.hood, 'loader') and hasattr(base.cr.playGame.hood.loader, 'place') and base.cr.playGame.getPlace() != None:
zoneId = base.cr.playGame.getPlace().getZoneId()
else:
zoneId = '?'
strPosCoordText = 'X: %.3f' % pos[0] + ', Y: %.3f' % pos[1] + '\nZ: %.3f' % pos[2] + ', H: %.3f' % hpr[0] + '\nZone: %s' % str(zoneId) + ', Ver: %s, ' % serverVersion + 'District: %s' % districtName
return strPosCoordText
self.refreshOnscreenButtons()
return
def thinkPos(self):
pos = self.getPos()
hpr = self.getHpr()
serverVersion = base.cr.getServerVersion()
districtName = base.cr.getShardName(base.localAvatar.defaultShard)
if hasattr(base.cr.playGame.hood, 'loader') and hasattr(base.cr.playGame.hood.loader, 'place') and base.cr.playGame.getPlace() != None:
zoneId = base.cr.playGame.getPlace().getZoneId()
else:
zoneId = '?'
strPos = '(%.3f' % pos[0] + '\n %.3f' % pos[1] + '\n %.3f)' % pos[2] + '\nH: %.3f' % hpr[0] + '\nZone: %s' % str(zoneId) + ',\nVer: %s, ' % serverVersion + '\nDistrict: %s' % districtName
print 'Current position=', strPos.replace('\n', ', ')
self.setChatAbsolute(strPos, CFThought | CFTimeout)
return
def __placeMarker(self):
pos = self.getPos()
hpr = self.getHpr()
chest = loader.loadModel('phase_4/models/props/coffin')
chest.reparentTo(render)
chest.setColor(1, 0, 0, 1)
chest.setPosHpr(pos, hpr)
chest.setScale(0.5)
def setFriendsListButtonActive(self, active):
self.friendsListButtonActive = active
self.refreshOnscreenButtons()
def obscureFriendsListButton(self, increment):
self.friendsListButtonObscured += increment
self.refreshOnscreenButtons()
def obscureMoveFurnitureButton(self, increment):
self.moveFurnitureButtonObscured += increment
self.refreshOnscreenButtons()
def obscureClarabelleButton(self, increment):
self.clarabelleButtonObscured += increment
self.refreshOnscreenButtons()
def refreshOnscreenButtons(self):
self.bFriendsList.hide()
self.hideFurnitureGui()
self.hideClarabelleGui()
clarabelleHidden = 1
self.ignore(ToontownGlobals.FriendsListHotkey)
if self.friendsListButtonActive and self.friendsListButtonObscured <= 0:
self.bFriendsList.show()
self.accept(ToontownGlobals.FriendsListHotkey, self.sendFriendsListEvent)
if self.clarabelleButtonObscured <= 0 and self.isTeleportAllowed():
if self.catalogNotify == ToontownGlobals.NewItems or self.mailboxNotify == ToontownGlobals.NewItems or self.simpleMailNotify == ToontownGlobals.NewItems or self.inviteMailNotify == ToontownGlobals.NewItems or self.awardNotify == ToontownGlobals.NewItems:
showClarabelle = not launcher or launcher.getPhaseComplete(5.5)
for quest in self.quests:
if quest[0] in Quests.PreClarabelleQuestIds and self.mailboxNotify != ToontownGlobals.NewItems and self.awardNotify != ToontownGlobals.NewItems:
showClarabelle = 0
if base.cr.playGame.getPlace().getState() == 'stickerBook':
showClarabelle = 0
if showClarabelle:
newItemsInMailbox = self.mailboxNotify == ToontownGlobals.NewItems or self.awardNotify == ToontownGlobals.NewItems
self.showClarabelleGui(newItemsInMailbox)
clarabelleHidden = 0
if clarabelleHidden:
if self.__catalogNotifyDialog:
self.__catalogNotifyDialog.cleanup()
self.__catalogNotifyDialog = None
else:
self.newCatalogNotify()
if self.moveFurnitureButtonObscured <= 0:
if self.furnitureManager != None and self.furnitureDirector == self.doId:
self.loadFurnitureGui()
self.__furnitureGui.setPos(0.155, -0.6, -1.045)
self.__furnitureGui.setScale(0.06)
elif self.cr.furnitureManager != None:
self.showFurnitureGui()
if self.__lerpFurnitureButton:
self.__lerpFurnitureButton.finish()
self.__lerpFurnitureButton = self.__furnitureGui.posHprScaleInterval(1.0, pos=Point3(0.115, 0.0, -0.66), hpr=Vec3(0.0, 0.0, 0.0), scale=Vec3(0.04, 0.04, 0.04), blendType='easeInOut', name='lerpFurnitureButton')
self.__lerpFurnitureButton.start()
if hasattr(self, 'inEstate') and self.inEstate:
self.loadGardeningGui()
self.hideGardeningGui()
else:
self.hideGardeningGui()
return
def setGhostMode(self, flag):
if flag == 2:
self.seeGhosts = 1
DistributedToon.DistributedToon.setGhostMode(self, flag)
def newCatalogNotify(self):
if not self.gotCatalogNotify:
return
hasPhase = not launcher or launcher.getPhaseComplete(5.5)
if not hasPhase:
return
if not self.friendsListButtonActive or self.friendsListButtonObscured > 0:
return
self.gotCatalogNotify = 0
currentWeek = self.catalogScheduleCurrentWeek - 1
if currentWeek < 57:
seriesNumber = currentWeek / ToontownGlobals.CatalogNumWeeksPerSeries + 1
weekNumber = currentWeek % ToontownGlobals.CatalogNumWeeksPerSeries + 1
elif currentWeek < 65:
seriesNumber = 6
weekNumber = currentWeek - 56
else:
seriesNumber = currentWeek / ToontownGlobals.CatalogNumWeeksPerSeries + 2
weekNumber = currentWeek % ToontownGlobals.CatalogNumWeeksPerSeries + 1
message = None
if self.mailboxNotify == ToontownGlobals.NoItems:
if self.catalogNotify == ToontownGlobals.NewItems:
if self.catalogScheduleCurrentWeek == 1:
message = (TTLocalizer.CatalogNotifyFirstCatalog, TTLocalizer.CatalogNotifyInstructions)
else:
message = (TTLocalizer.CatalogNotifyNewCatalog % weekNumber,)
elif self.mailboxNotify == ToontownGlobals.NewItems:
if self.catalogNotify == ToontownGlobals.NewItems:
message = (TTLocalizer.CatalogNotifyNewCatalogNewDelivery % weekNumber,)
else:
message = (TTLocalizer.CatalogNotifyNewDelivery,)
elif self.mailboxNotify == ToontownGlobals.OldItems:
if self.catalogNotify == ToontownGlobals.NewItems:
message = (TTLocalizer.CatalogNotifyNewCatalogOldDelivery % weekNumber,)
else:
message = (TTLocalizer.CatalogNotifyOldDelivery,)
if self.awardNotify == ToontownGlobals.NoItems:
pass
elif self.awardNotify == ToontownGlobals.NewItems:
oldStr = ''
if message:
oldStr = message[0] + ' '
oldStr += TTLocalizer.AwardNotifyNewItems
message = (oldStr,)
elif self.awardNotify == ToontownGlobals.OldItems:
oldStr = ''
if message:
oldStr = message[0] + ' '
oldStr += TTLocalizer.AwardNotifyOldItems
message = (oldStr,)
if self.simpleMailNotify == ToontownGlobals.NewItems or self.inviteMailNotify == ToontownGlobals.NewItems:
oldStr = ''
if message:
oldStr = message[0] + ' '
oldStr += TTLocalizer.MailNotifyNewItems
message = (oldStr,)
if message == None:
return
#if self.__catalogNotifyDialog:
#self.__catalogNotifyDialog.cleanup()
#self.__catalogNotifyDialog = CatalogNotifyDialog.CatalogNotifyDialog(message)
#base.playSfx(self.soundPhoneRing)
return
def allowHardLand(self):
retval = LocalAvatar.LocalAvatar.allowHardLand(self)
return retval
def setShovelGuiLevel(self, level = 0):
pass
def setWateringCanGuiLevel(self, level = 0):
pass
def loadGardeningGui(self):
if self.__gardeningGui:
return
gardenGuiCard = loader.loadModel('phase_5.5/models/gui/planting_gui')
self.__gardeningGui = DirectFrame(relief=None, geom=gardenGuiCard, geom_color=GlobalDialogColor, geom_scale=(0.17, 1.0, 0.3), pos=(-1.2, 0, 0.5), scale=1.0)
self.__gardeningGui.setName('gardeningFrame')
self.__gardeningGuiFake = DirectFrame(relief=None, geom=None, geom_color=GlobalDialogColor, geom_scale=(0.17, 1.0, 0.3), pos=(-1.2, 0, 0.5), scale=1.0)
self.__gardeningGuiFake.setName('gardeningFrameFake')
iconScale = 1
iconColorWhite = Vec4(1.0, 1.0, 1.0, 1.0)
iconColorGrey = Vec4(0.7, 0.7, 0.7, 1.0)
iconColorBrown = Vec4(0.7, 0.4, 0.3, 1.0)
iconColorBlue = Vec4(0.2, 0.3, 1.0, 1.0)
shovelCardP = loader.loadModel('phase_5.5/models/gui/planting_but_shovel_P')
shovelCardY = loader.loadModel('phase_5.5/models/gui/planting_but_shovel_Y')
wateringCanCardP = loader.loadModel('phase_5.5/models/gui/planting_but_can_P')
wateringCanCardY = loader.loadModel('phase_5.5/models/gui/planting_but_can_Y')
backCard = loader.loadModel('phase_5.5/models/gui/planting_gui')
iconImage = None
iconModels = loader.loadModel('phase_3.5/models/gui/sos_textures')
iconGeom = iconModels.find('**/fish')
buttonText = TTLocalizer.GardeningPlant
self.shovelText = ('',
'',
buttonText,
'')
self.__shovelButtonFake = DirectLabel(parent=self.__gardeningGuiFake, relief=None, text=self.shovelText, text_align=TextNode.ALeft, text_pos=(0.0, -0.0), text_scale=0.07, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), image_scale=(0.18, 1.0, 0.36), geom=None, geom_scale=iconScale, geom_color=iconColorWhite, pos=(0.15, 0, 0.2), scale=0.775)
self.shovelButtonFake = self.__shovelButtonFake
self.shovelText = ('',
'',
buttonText,
'')
self.__shovelButton = DirectButton(parent=self.__gardeningGui, relief=None, text=self.shovelText, text_align=TextNode.ACenter, text_pos=(0.0, -0.0), text_scale=0.1, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), image=(shovelCardP,
shovelCardY,
shovelCardY,
shovelCardY), image_scale=(0.18, 1.0, 0.36), geom=None, geom_scale=iconScale, geom_color=iconColorWhite, pos=(0, 0, 0.2), scale=0.775, command=self.__shovelButtonClicked)
self.shovelButton = self.__shovelButton
iconGeom = iconModels.find('**/teleportIcon')
buttonText = TTLocalizer.GardeningWater
self.waterText = (buttonText,
buttonText,
buttonText,
'')
self.__wateringCanButtonFake = DirectLabel(parent=self.__gardeningGuiFake, relief=None, text=self.waterText, text_align=TextNode.ALeft, text_pos=(0.0, -0.0), text_scale=0.07, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), image_scale=(0.18, 1.0, 0.36), geom=None, geom_scale=iconScale, geom_color=iconColorWhite, pos=(0.15, 0, 0.01), scale=0.775)
self.wateringCanButtonFake = self.__wateringCanButtonFake
self.__wateringCanButton = DirectButton(parent=self.__gardeningGui, relief=None, text=self.waterText, text_align=TextNode.ACenter, text_pos=(0.0, -0.0), text_scale=0.1, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), image=(wateringCanCardP,
wateringCanCardY,
wateringCanCardY,
wateringCanCardY), image_scale=(0.18, 1.0, 0.36), geom=None, geom_scale=iconScale, geom_color=iconColorWhite, pos=(0, 0, 0.01), scale=0.775, command=self.__wateringCanButtonClicked)
self.wateringCanButton = self.__wateringCanButton
self.basketText = '%s / %s' % (self.numFlowers, self.maxFlowerBasket)
self.basketButton = DirectLabel(parent=self.__gardeningGui, relief=None, text=self.basketText, text_align=TextNode.ALeft, text_pos=(0.82, -1.4), text_scale=0.2, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), image=None, image_scale=iconScale, geom=None, geom_scale=iconScale, geom_color=iconColorWhite, pos=(-0.34, 0, 0.16), scale=0.3, textMayChange=1)
if hasattr(self, 'shovel'):
self.setShovelGuiLevel(self.shovel)
if hasattr(self, 'wateringCan'):
self.setWateringCanGuiLevel(self.wateringCan)
self.__shovelButton.hide()
self.__wateringCanButton.hide()
self.__shovelButtonFake.hide()
self.__wateringCanButtonFake.hide()
return
def changeButtonText(self, button, text):
button['text'] = text
def resetWaterText(self):
self.wateringCanButton['text'] = self.waterText
def resetShovelText(self):
self.shovelButton['text'] = self.holdShovelText
def showGardeningGui(self):
self.loadGardeningGui()
self.__gardeningGui.show()
base.setCellsAvailable([base.leftCells[2]], 0)
def hideGardeningGui(self):
if self.__gardeningGui:
self.__gardeningGui.hide()
base.setCellsAvailable([base.leftCells[2]], 1)
def showShovelButton(self, add = 0):
if add:
self.shovelButtonActiveCount += add
else:
self.showingShovel = 1
self.notify.debug('showing shovel %s' % self.shovelButtonActiveCount)
self.__gardeningGui.show()
self.__shovelButton.show()
def hideShovelButton(self, deduct = 0):
self.shovelButtonActiveCount -= deduct
if deduct == 0:
self.showingShovel = 0
if self.shovelButtonActiveCount < 1:
self.shovelButtonActiveCount = 0
if self.showingShovel == 0:
self.__shovelButton.hide()
self.handleAllGardeningButtonsHidden()
self.notify.debug('hiding shovel %s' % self.shovelButtonActiveCount)
def showWateringCanButton(self, add = 0):
if add:
self.wateringCanButtonActiveCount += add
else:
self.showingWateringCan = 1
self.__gardeningGui.show()
self.__wateringCanButton.show()
self.basketButton.show()
def hideWateringCanButton(self, deduct = 0):
self.wateringCanButtonActiveCount -= deduct
if deduct == 0:
self.showingWateringCan = 0
if self.wateringCanButtonActiveCount < 1:
wateringCanButtonActiveCount = 0
if self.showingWateringCan == 0:
self.__wateringCanButton.hide()
self.handleAllGardeningButtonsHidden()
def showWateringCanButtonFake(self, add = 0):
self.__wateringCanButtonFake.show()
def hideWateringCanButtonFake(self, deduct = 0):
self.__wateringCanButtonFake.hide()
def showShovelButtonFake(self, add = 0):
self.__shovelButtonFake.show()
def hideShovelButtonFake(self, deduct = 0):
self.__shovelButtonFake.hide()
def levelWater(self, change = 1):
if change < 0:
return
self.showWateringCanButtonFake(1)
if change < 1:
changeString = TTLocalizer.GardeningNoSkill
else:
changeString = '+%s %s' % (change, TTLocalizer.GardeningWaterSkill)
self.waterTrack = Sequence(Wait(0.0), Func(self.changeButtonText, self.wateringCanButtonFake, changeString), SoundInterval(globalBattleSoundCache.getSound('GUI_balloon_popup.ogg'), node=self), Wait(1.0), Func(self.hideWateringCanButtonFake, 1))
self.waterTrack.start()
def levelShovel(self, change = 1):
if change < 1:
return
self.showShovelButtonFake(1)
if change < 1:
changeString = TTLocalizer.GardeningNoSkill
else:
changeString = '+%s %s' % (change, TTLocalizer.GardeningShovelSkill)
plant = base.cr.doId2do.get(self.shovelRelatedDoId)
if plant:
self.holdShovelText = plant.getShovelAction()
self.shovelTrack = Sequence(Wait(0.0), Func(self.changeButtonText, self.shovelButtonFake, changeString), SoundInterval(globalBattleSoundCache.getSound('GUI_balloon_popup.ogg'), node=self), Wait(1.0), Func(self.hideShovelButtonFake, 1))
self.shovelTrack.start()
def setGuiConflict(self, con):
self.guiConflict = con
def getGuiConflict(self, con):
return self.guiConflict
def verboseState(self):
self.lastPlaceState = 'None'
taskMgr.add(self.__expressState, 'expressState', extraArgs=[])
def __expressState(self, task = None):
place = base.cr.playGame.getPlace()
if place:
state = place.fsm.getCurrentState()
if state.getName() != self.lastPlaceState:
print 'Place State Change From %s to %s' % (self.lastPlaceState, state.getName())
self.lastPlaceState = state.getName()
return Task.cont
def addShovelRelatedDoId(self, doId):
if hasattr(base.cr.playGame.getPlace(), 'detectedGardenPlotDone'):
place = base.cr.playGame.getPlace()
state = place.fsm.getCurrentState()
if state.getName() == 'stopped':
return
self.touchingPlantList.append(doId)
self.autoSetActivePlot()
def removeShovelRelatedDoId(self, doId):
if doId in self.touchingPlantList:
self.touchingPlantList.remove(doId)
self.autoSetActivePlot()
def autoSetActivePlot(self):
if self.guiConflict:
return
if len(self.touchingPlantList) > 0:
minDist = 10000
minDistPlot = 0
for plot in self.touchingPlantList:
plant = base.cr.doId2do.get(plot)
if plant:
if self.getDistance(plant) < minDist:
minDist = self.getDistance(plant)
minDistPlot = plot
else:
self.touchingPlantList.remove(plot)
if len(self.touchingPlantList) == 0:
self.setActivePlot(None)
else:
self.setActivePlot(minDistPlot)
else:
self.setActivePlot(None)
return
def setActivePlot(self, doId):
if not self.gardenStarted:
return
self.shovelRelatedDoId = doId
plant = base.cr.doId2do.get(doId)
if plant:
self.startStareAt(plant, Point3(0, 0, 1))
self.__shovelButton['state'] = DGG.NORMAL
if not plant.canBePicked():
self.hideShovelButton()
else:
self.showShovelButton()
self.setShovelAbility(TTLocalizer.GardeningPlant)
if plant.getShovelAction():
self.setShovelAbility(plant.getShovelAction())
if plant.getShovelAction() == TTLocalizer.GardeningPick:
if not plant.unlockPick():
self.__shovelButton['state'] = DGG.DISABLED
self.setShovelAbility(TTLocalizer.GardeningFull)
self.notify.debug('self.shovelRelatedDoId = %d' % self.shovelRelatedDoId)
if plant.getShovelCommand():
self.extraShovelCommand = plant.getShovelCommand()
self.__shovelButton['command'] = self.__shovelButtonClicked
if plant.canBeWatered():
self.showWateringCanButton()
else:
self.hideWateringCanButton()
else:
self.stopStareAt()
self.shovelRelatedDoId = 0
if self.__shovelButton:
self.__shovelButton['command'] = None
self.hideShovelButton()
self.hideWateringCanButton()
self.handleAllGardeningButtonsHidden()
if not self.inGardenAction:
if hasattr(base.cr.playGame.getPlace(), 'detectedGardenPlotDone'):
place = base.cr.playGame.getPlace()
if place:
place.detectedGardenPlotDone()
return
def setPlantToWater(self, plantId):
import pdb
pdb.set_trace()
if self.plantToWater == None:
self.plantToWater = plantId
self.notify.debug('setting plant to water %s' % plantId)
return
def clearPlantToWater(self, plantId):
if not hasattr(self, 'secondaryPlant'):
self.secondaryWaterPlant = None
if self.plantToWater == plantId:
self.plantToWater = None
self.hideWateringCanButton()
return
def hasPlant(self):
if self.plantToWater != None:
return 1
else:
return 0
return
def handleAllGardeningButtonsHidden(self):
somethingVisible = False
if not self.__shovelButton.isHidden():
somethingVisible = True
if not self.__wateringCanButton.isHidden():
somethingVisible = True
if not somethingVisible:
self.hideGardeningGui()
def setShovelAbility(self, ability):
self.shovelAbility = ability
if self.__shovelButton:
self.__shovelButton['text'] = ability
def setFlowerBasket(self, speciesList, varietyList):
DistributedToon.DistributedToon.setFlowerBasket(self, speciesList, varietyList)
self.numFlowers = len(self.flowerBasket.flowerList)
self.maxFlowerBasket
if hasattr(self, 'basketButton'):
self.basketText = '%s / %s' % (self.numFlowers, self.maxFlowerBasket)
self.basketButton['text'] = self.basketText
def setShovelSkill(self, skillLevel):
if hasattr(self, 'shovelSkill') and hasattr(self, 'shovelButton'):
if self.shovelSkill != None:
self.levelShovel(skillLevel - self.shovelSkill)
oldShovelSkill = self.shovelSkill
DistributedToon.DistributedToon.setShovelSkill(self, skillLevel)
if hasattr(self, 'shovel'):
oldShovelPower = GardenGlobals.getShovelPower(self.shovel, oldShovelSkill)
newShovelPower = GardenGlobals.getShovelPower(self.shovel, self.shovelSkill)
almostMaxedSkill = GardenGlobals.ShovelAttributes[GardenGlobals.MAX_SHOVELS - 1]['skillPts'] - 2
if skillLevel >= GardenGlobals.ShovelAttributes[self.shovel]['skillPts']:
self.promoteShovel()
elif oldShovelSkill and oldShovelPower < newShovelPower:
self.promoteShovelSkill(self.shovel, self.shovelSkill)
elif oldShovelSkill == almostMaxedSkill and newShovelPower == GardenGlobals.getNumberOfShovelBoxes():
self.promoteShovelSkill(self.shovel, self.shovelSkill)
return
def setWateringCanSkill(self, skillLevel):
skillDelta = skillLevel - self.wateringCanSkill
if skillDelta or 1:
if hasattr(self, 'wateringCanSkill') and hasattr(self, 'wateringCanButton'):
if self.wateringCanSkill != None:
self.levelWater(skillDelta)
DistributedToon.DistributedToon.setWateringCanSkill(self, skillLevel)
if hasattr(self, 'wateringCan'):
if skillLevel >= GardenGlobals.WateringCanAttributes[self.wateringCan]['skillPts']:
self.promoteWateringCan()
return
def unlockGardeningButtons(self, task = None):
if hasattr(self, '_LocalToon__shovelButton'):
try:
self.__shovelButton['state'] = DGG.NORMAL
except TypeError:
self.notify.warning('Could not unlock the shovel button- Type Error')
if hasattr(self, '_LocalToon__wateringCanButton'):
try:
self.__wateringCanButton['state'] = DGG.NORMAL
except TypeError:
self.notify.warning('Could not unlock the watering can button - Type Error')
taskMgr.remove('unlockGardenButtons')
return None
def lockGardeningButtons(self, task = None):
if hasattr(self, '_LocalToon__shovelButton'):
try:
self.__shovelButton['state'] = DGG.DISABLED
except TypeError:
self.notify.warning('Could not lock the shovel button- Type Error')
if hasattr(self, '_LocalToon__wateringCanButton'):
try:
self.__wateringCanButton['state'] = DGG.DISABLED
except TypeError:
self.notify.warning('Could not lock the watering can button - Type Error')
self.accept('endPlantInteraction', self.__handleEndPlantInteraction)
return None
def reactivateShovel(self, task = None):
if hasattr(self, '_LocalToon__shovelButton'):
self.__shovelButton['state'] = DGG.NORMAL
taskMgr.remove('reactShovel')
return None
def reactivateWater(self, task = None):
if hasattr(self, '_LocalToon__wateringCanButton'):
self.__wateringCanButton['state'] = DGG.NORMAL
taskMgr.remove('reactWater')
return None
def handleEndPlantInteraction(self, object = None, replacement = 0):
if not replacement:
self.setInGardenAction(None, object)
self.autoSetActivePlot()
return
def __handleEndPlantInteraction(self, task = None):
self.setInGardenAction(None)
self.autoSetActivePlot()
return
def promoteShovelSkill(self, shovelLevel, shovelSkill):
shovelName = GardenGlobals.ShovelAttributes[shovelLevel]['name']
shovelBeans = GardenGlobals.getShovelPower(shovelLevel, shovelSkill)
oldShovelBeans = GardenGlobals.getShovelPower(shovelLevel, shovelSkill - 1)
doPartyBall = False
message = TTLocalizer.GardenShovelSkillLevelUp % {'shovel': shovelName,
'oldbeans': oldShovelBeans,
'newbeans': shovelBeans}
if shovelBeans == GardenGlobals.getNumberOfShovelBoxes():
if shovelSkill == GardenGlobals.ShovelAttributes[shovelLevel]['skillPts'] - 1:
doPartyBall = True
message = TTLocalizer.GardenShovelSkillMaxed % {'shovel': shovelName,
'oldbeans': oldShovelBeans,
'newbeans': shovelBeans}
messagePos = Vec2(0, 0.2)
messageScale = 0.07
image = loader.loadModel('phase_5.5/models/gui/planting_but_shovel_P')
imagePos = Vec3(0, 0, -0.13)
imageScale = Vec3(0.28, 0, 0.56)
if doPartyBall:
go = Fanfare.makeFanfareWithMessageImage(0, base.localAvatar, 1, message, Vec2(0, 0.2), 0.08, image, Vec3(0, 0, -0.1), Vec3(0.35, 0, 0.7), wordwrap=23)
Sequence(go[0], Func(go[1].show), LerpColorScaleInterval(go[1], duration=0.5, startColorScale=Vec4(1, 1, 1, 0), colorScale=Vec4(1, 1, 1, 1)), Wait(10), LerpColorScaleInterval(go[1], duration=0.5, startColorScale=Vec4(1, 1, 1, 1), colorScale=Vec4(1, 1, 1, 0)), Func(go[1].remove)).start()
else:
go = Fanfare.makePanel(base.localAvatar, 1)
Fanfare.makeMessageBox(go, message, messagePos, messageScale, wordwrap=24)
Fanfare.makeImageBox(go.itemFrame, image, imagePos, imageScale)
Sequence(Func(go.show), LerpColorScaleInterval(go, duration=0.5, startColorScale=Vec4(1, 1, 1, 0), colorScale=Vec4(1, 1, 1, 1)), Wait(10), LerpColorScaleInterval(go, duration=0.5, startColorScale=Vec4(1, 1, 1, 1), colorScale=Vec4(1, 1, 1, 0)), Func(go.remove)).start()
def promoteShovel(self, shovelLevel = 0):
shovelName = GardenGlobals.ShovelAttributes[shovelLevel]['name']
shovelBeans = GardenGlobals.getShovelPower(shovelLevel, 0)
message = TTLocalizer.GardenShovelLevelUp % {'shovel': shovelName,
'oldbeans': shovelBeans - 1,
'newbeans': shovelBeans}
messagePos = Vec2(0, 0.2)
messageScale = 0.07
image = loader.loadModel('phase_5.5/models/gui/planting_but_shovel_P')
imagePos = Vec3(0, 0, -0.13)
imageScale = Vec3(0.28, 0, 0.56)
go = Fanfare.makePanel(base.localAvatar, 1)
Fanfare.makeMessageBox(go, message, messagePos, messageScale, wordwrap=24)
Fanfare.makeImageBox(go.itemFrame, image, imagePos, imageScale)
Sequence(Func(go.show), LerpColorScaleInterval(go, duration=0.5, startColorScale=Vec4(1, 1, 1, 0), colorScale=Vec4(1, 1, 1, 1)), Wait(10), LerpColorScaleInterval(go, duration=0.5, startColorScale=Vec4(1, 1, 1, 1), colorScale=Vec4(1, 1, 1, 0)), Func(go.remove)).start()
def promoteWateringCan(self, wateringCanlevel = 0):
message = TTLocalizer.GardenWateringCanLevelUp + ' \n' + GardenGlobals.WateringCanAttributes[wateringCanlevel]['name']
messagePos = Vec2(0, 0.2)
messageScale = 0.08
image = loader.loadModel('phase_5.5/models/gui/planting_but_can_P')
imagePos = Vec3(0, 0, -0.1)
imageScale = Vec3(0.35, 0, 0.7)
if wateringCanlevel >= GardenGlobals.MAX_WATERING_CANS - 1:
go = Fanfare.makeFanfareWithMessageImage(0, base.localAvatar, 1, message, Vec2(0, 0.2), 0.08, image, Vec3(0, 0, -0.1), Vec3(0.35, 0, 0.7))
Sequence(go[0], Func(go[1].show), LerpColorScaleInterval(go[1], duration=0.5, startColorScale=Vec4(1, 1, 1, 0), colorScale=Vec4(1, 1, 1, 1)), Wait(5), LerpColorScaleInterval(go[1], duration=0.5, startColorScale=Vec4(1, 1, 1, 1), colorScale=Vec4(1, 1, 1, 0)), Func(go[1].remove)).start()
else:
go = Fanfare.makePanel(base.localAvatar, 1)
Fanfare.makeMessageBox(go, message, messagePos, messageScale)
Fanfare.makeImageBox(go.itemFrame, image, imagePos, imageScale)
Sequence(Func(go.show), LerpColorScaleInterval(go, duration=0.5, startColorScale=Vec4(1, 1, 1, 0), colorScale=Vec4(1, 1, 1, 1)), Wait(5), LerpColorScaleInterval(go, duration=0.5, startColorScale=Vec4(1, 1, 1, 1), colorScale=Vec4(1, 1, 1, 0)), Func(go.remove)).start()
def setInGardenAction(self, actionObject, fromObject = None):
if actionObject:
self.lockGardeningButtons()
elif fromObject:
self.unlockGardeningButtons()
else:
self.unlockGardeningButtons()
self.inGardenAction = actionObject
def __wateringCanButtonClicked(self):
self.notify.debug('wateringCanButtonClicked')
if self.inGardenAction:
return
plant = base.cr.doId2do.get(self.shovelRelatedDoId)
if plant:
if hasattr(plant, 'handleWatering'):
plant.handleWatering()
messenger.send('wakeup')
def __shovelButtonClicked(self):
if self.inGardenAction:
return
self.notify.debug('shovelButtonClicked')
messenger.send('wakeup')
thingId = self.shovelRelatedDoId
thing = base.cr.doId2do.get(thingId)
if hasattr(self, 'extraShovelCommand'):
self.extraShovelCommand()
self.setActivePlot(thingId)
def setShovel(self, shovelId):
DistributedToon.DistributedToon.setShovel(self, shovelId)
if self.__gardeningGui:
self.setShovelGuiLevel(shovelId)
def setWateringCan(self, wateringCanId):
DistributedToon.DistributedToon.setWateringCan(self, wateringCanId)
if self.__gardeningGui:
self.setWateringCanGuiLevel(wateringCanId)
def setGardenStarted(self, bStarted):
self.gardenStarted = bStarted
if self.gardenStarted and not self.gardenPage and hasattr(self, 'book'):
self.loadGardenPages()
def b_setAnimState(self, animName, animMultiplier = 1.0, callback = None, extraArgs = []):
if self.wantStatePrint:
print 'Local Toon Anim State %s' % animName
DistributedToon.DistributedToon.b_setAnimState(self, animName, animMultiplier, callback, extraArgs)
def swimTimeoutAction(self):
self.ignore('wakeup')
self.takeOffSuit()
base.cr.playGame.getPlace().fsm.request('final')
self.b_setAnimState('TeleportOut', 1, self.__handleSwimExitTeleport, [0])
return Task.done
def __handleSwimExitTeleport(self, requestStatus):
self.notify.info('closing shard...')
base.cr.gameFSM.request('closeShard', ['afkTimeout'])
def sbFriendAdd(self, id, info):
print 'sbFriendAdd'
def sbFriendUpdate(self, id, info):
print 'sbFriendUpdate'
def sbFriendRemove(self, id):
print 'sbFriendRemove'
def addGolfPage(self):
if self.hasPlayedGolf():
if hasattr(self, 'golfPage') and self.golfPage != None:
return
if not launcher.getPhaseComplete(6):
self.acceptOnce('phaseComplete-6', self.addGolfPage)
return
self.golfPage = GolfPage.GolfPage()
self.golfPage.setAvatar(self)
self.golfPage.load()
self.book.addPage(self.golfPage, pageName=TTLocalizer.GolfPageTitle)
return
def addEventsPage(self):
if hasattr(self, 'eventsPage') and self.eventsPage != None:
return
if not launcher.getPhaseComplete(4):
self.acceptOnce('phaseComplete-4', self.addEventsPage)
return
self.eventsPage = EventsPage.EventsPage()
self.eventsPage.load()
self.book.addPage(self.eventsPage, pageName=TTLocalizer.EventsPageName)
return
def addNewsPage(self):
self.newsPage = NewsPage.NewsPage()
self.newsPage.load()
self.book.addPage(self.newsPage, pageName=TTLocalizer.NewsPageName)
def addTIPPage(self):
self.tipPage = TIPPage.TIPPage()
self.tipPage.load()
self.book.addPage(self.tipPage, pageName=TTLocalizer.TIPPageTitle)
def setPinkSlips(self, pinkSlips):
DistributedToon.DistributedToon.setPinkSlips(self, pinkSlips)
self.inventory.updateTotalPropsText()
def getAccountDays(self):
days = 0
defaultDays = base.cr.config.GetInt('account-days', -1)
if defaultDays >= 0:
days = defaultDays
elif hasattr(base.cr, 'accountDays'):
days = base.cr.accountDays
return days
def hasActiveBoardingGroup(self):
if hasattr(localAvatar, 'boardingParty') and localAvatar.boardingParty:
return localAvatar.boardingParty.hasActiveGroup(localAvatar.doId)
else:
return False
def getZoneId(self):
return self._zoneId
def setZoneId(self, value):
if value == -1:
self.notify.error('zoneId should not be set to -1, tell Redmond')
self._zoneId = value
zoneId = property(getZoneId, setZoneId)
def systemWarning(self, warningText = 'Acknowledge this system message.'):
self.createSystemMsgAckGui()
self.systemMsgAckGui['text'] = warningText
self.systemMsgAckGui.show()
def createSystemMsgAckGui(self):
if self.systemMsgAckGui == None or self.systemMsgAckGui.isEmpty():
message = 'o' * 100
self.systemMsgAckGui = TTDialog.TTGlobalDialog(doneEvent=self.systemMsgAckGuiDoneEvent, message=message, style=TTDialog.Acknowledge)
self.systemMsgAckGui.hide()
return
def hideSystemMsgAckGui(self):
if self.systemMsgAckGui != None and not self.systemMsgAckGui.isEmpty():
self.systemMsgAckGui.hide()
return
def setSleepAutoReply(self, fromId):
av = base.cr.identifyAvatar(fromId)
if isinstance(av, DistributedToon.DistributedToon):
base.localAvatar.setSystemMessage(0, TTLocalizer.sleep_auto_reply % av.getName(), WhisperPopup.WTToontownBoardingGroup)
elif av is not None:
self.notify.warning('setSleepAutoReply from non-toon %s' % fromId)
return
def setLastTimeReadNews(self, newTime):
self.lastTimeReadNews = newTime
def getLastTimeReadNews(self):
return self.lastTimeReadNews
def cheatCogdoMazeGame(self, kindOfCheat = 0):
if config.GetBool('allow-cogdo-maze-suit-hit-cheat'):
maze = base.cr.doFind('DistCogdoMazeGame')
if maze:
if kindOfCheat == 0:
for suitNum in maze.game.suitsById.keys():
suit = maze.game.suitsById[suitNum]
maze.sendUpdate('requestSuitHitByGag', [suit.type, suitNum])
elif kindOfCheat == 1:
for joke in maze.game.pickups:
maze.sendUpdate('requestPickUp', [joke.serialNum])
else:
self.sendUpdate('logSuspiciousEvent', ['cheatCogdoMazeGame'])
def isReadingNews(self):
result = False
if base.cr and base.cr.playGame and base.cr.playGame.getPlace() and hasattr(base.cr.playGame.getPlace(), 'fsm') and base.cr.playGame.getPlace().fsm:
fsm = base.cr.playGame.getPlace().fsm
curState = fsm.getCurrentState().getName()
if curState == 'stickerBook' and WantNewsPage:
if hasattr(self, 'newsPage'):
if self.book.isOnPage(self.newsPage):
result = True
return result
def isBookOpen(self):
result = False
if base.cr and base.cr.playGame and base.cr.playGame.getPlace() and hasattr(base.cr.playGame.getPlace(), 'fsm') and base.cr.playGame.getPlace().fsm:
fsm = base.cr.playGame.getPlace().fsm
curState = fsm.getCurrentState().getName()
if curState == 'stickerBook':
result = True
return result
def doTeleportResponse(self, fromAvatar, toAvatar, avId, available, shardId, hoodId, zoneId, sendToId):
localAvatar.d_teleportResponse(avId, available, shardId, hoodId, zoneId, sendToId)
def d_teleportResponse(self, avId, available, shardId, hoodId, zoneId, sendToId = None):
if config.GetBool('want-tptrack', False):
if available == 1:
self.notify.debug('sending teleportResponseToAI')
self.sendUpdate('teleportResponseToAI', [avId,
available,
shardId,
hoodId,
zoneId,
sendToId])
else:
self.sendUpdate('teleportResponse', [avId,
available,
shardId,
hoodId,
zoneId], sendToId)
else:
DistributedPlayer.DistributedPlayer.d_teleportResponse(self, avId, available, shardId, hoodId, zoneId, sendToId)
def startQuestMap(self):
if self.questMap:
self.questMap.start()
def stopQuestMap(self):
if self.questMap:
self.questMap.stop()
def getPetId(self):
return self.petId
def hasPet(self):
return self.petId != 0
def _startZombieCheck(self):
pass
def _stopZombieCheck(self):
pass
|
silly-wacky-3-town-toon/SOURCE-COD
|
toontown/toon/LocalToon.py
|
Python
|
apache-2.0
| 87,894
|
[
"VisIt"
] |
b58f4993d54e81b2b913d8c56493130e81578421fb2648981ce6725cc9c7697f
|
"""Options manager for :class:`~diofant.polys.polytools.Poly` and public API functions."""
import re
from ..core import Basic, I, sympify
from ..utilities import has_dups, numbered_symbols, topological_sort
from .polyerrors import FlagError, GeneratorsError, OptionError
__all__ = 'Options', 'Order'
class Option:
"""Base class for all kinds of options."""
option = None
is_Flag = False
requires = []
excludes = []
after = []
before = []
@classmethod
def default(cls):
return
@classmethod
def preprocess(cls, option):
return # pragma: no cover
@classmethod
def postprocess(cls, options):
return
class Flag(Option):
"""Base class for all kinds of flags."""
is_Flag = True
class BooleanOption(Option):
"""An option that must have a boolean value or equivalent assigned."""
@classmethod
def preprocess(cls, value):
if value in [True, False]:
return bool(value)
else:
raise OptionError(f"'{cls.option}' must have a boolean value assigned, got {value}")
class OptionType(type):
"""Base type for all options that does registers options."""
def __init__(cls, *args, **kwargs):
@property
def getter(a):
try:
return a[cls.option]
except KeyError:
return cls.default()
setattr(Options, cls.option, getter)
Options.__options__[cls.option] = cls
class Options(dict):
"""
Options manager for polynomial manipulation module.
Examples
========
>>> Options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
>>> build_options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
**Options**
* Expand --- boolean option
* Gens --- option
* Wrt --- option
* Sort --- option
* Order --- option
* Field --- boolean option
* Greedy --- boolean option
* Domain --- option
* Split --- boolean option
* Gaussian --- boolean option
* Extension --- option
* Modulus --- option
* Symmetric --- boolean option
* Strict --- boolean option
**Flags**
* Auto --- boolean flag
* Frac --- boolean flag
* Formal --- boolean flag
* Polys --- boolean flag
* Include --- boolean flag
* All --- boolean flag
* Gen --- flag
"""
__order__ = None
__options__ = {}
def __init__(self, gens, args, flags=None, strict=False):
dict.__init__(self)
if gens and args.get('gens', ()):
raise OptionError(
"both '*gens' and keyword argument 'gens' supplied")
elif gens:
args = dict(args)
args['gens'] = gens
defaults = args.pop('defaults', {})
def preprocess_options(args):
for option, value in args.items():
try:
cls = self.__options__[option]
except KeyError:
raise OptionError(f"'{option}' is not a valid option")
if issubclass(cls, Flag):
if strict and (flags is None or option not in flags):
raise OptionError(f"'{option}' flag is not allowed in this context")
if value is not None:
self[option] = cls.preprocess(value)
preprocess_options(args)
for key, value in dict(defaults).items():
if key in self:
del defaults[key]
else:
for option in self:
cls = self.__options__[option]
if key in cls.excludes:
del defaults[key]
break
preprocess_options(defaults)
for option in self:
cls = self.__options__[option]
for exclude_option in cls.excludes:
if self.get(exclude_option) is not None:
raise OptionError(f"'{option}' option is not allowed together with '{exclude_option}'")
for option in self.__order__:
self.__options__[option].postprocess(self)
@classmethod
def _init_dependencies_order(cls):
"""Resolve the order of options' processing."""
if cls.__order__ is None:
vertices, edges = [], set()
for name, option in cls.__options__.items():
vertices.append(name)
for _name in option.after:
edges.add((_name, name))
for _name in option.before:
edges.add((name, _name))
try:
cls.__order__ = topological_sort((vertices, list(edges)))
except ValueError:
raise RuntimeError('cycle detected in diofant.polys'
' options framework')
def clone(self, updates={}):
"""Clone ``self`` and update specified options."""
obj = dict.__new__(self.__class__)
for option, value in self.items():
obj[option] = value
for option, value in updates.items():
obj[option] = value
return obj
def __setattr__(self, attr, value):
if attr in self.__options__:
self[attr] = value
else:
super().__setattr__(attr, value)
@property
def args(self):
args = {}
for option, value in self.items():
if value is not None and option != 'gens':
cls = self.__options__[option]
if not issubclass(cls, Flag):
args[option] = value
return args
@property
def options(self):
options = {}
for option, cls in self.__options__.items():
if not issubclass(cls, Flag):
options[option] = getattr(self, option)
return options
@property
def flags(self):
flags = {}
for option, cls in self.__options__.items():
if issubclass(cls, Flag):
flags[option] = getattr(self, option)
return flags
class Expand(BooleanOption, metaclass=OptionType):
"""``expand`` option to polynomial manipulation functions."""
option = 'expand'
requires = []
excludes = []
@classmethod
def default(cls):
return True
class Gens(Option, metaclass=OptionType):
"""``gens`` option to polynomial manipulation functions."""
option = 'gens'
requires = []
excludes = []
@classmethod
def default(cls):
return ()
@classmethod
def preprocess(cls, gens):
if isinstance(gens, Basic):
gens = gens,
elif len(gens) == 1 and hasattr(gens[0], '__iter__'):
gens = gens[0]
if gens == (None,):
gens = ()
elif has_dups(gens):
raise GeneratorsError(f'duplicated generators: {gens}')
elif any(gen.is_commutative is False for gen in gens):
raise GeneratorsError(f'non-commutative generators: {gens}')
return tuple(gens)
class Wrt(Option, metaclass=OptionType):
"""``wrt`` option to polynomial manipulation functions."""
option = 'wrt'
requires = []
excludes = []
_re_split = re.compile(r'\s*,\s*|\s+')
@classmethod
def preprocess(cls, wrt):
if isinstance(wrt, Basic):
return [str(wrt)]
elif isinstance(wrt, str):
wrt = wrt.strip()
if wrt.endswith(','):
raise OptionError('Bad input: missing parameter.')
if not wrt:
return []
return list(cls._re_split.split(wrt))
elif hasattr(wrt, '__getitem__'):
return list(map(str, wrt))
else:
raise OptionError("invalid argument for 'wrt' option")
class Sort(Option, metaclass=OptionType):
"""``sort`` option to polynomial manipulation functions."""
option = 'sort'
requires = []
excludes = []
@classmethod
def default(cls):
return []
@classmethod
def preprocess(cls, sort):
if isinstance(sort, str):
return [gen.strip() for gen in sort.split('>')]
elif hasattr(sort, '__getitem__'):
return list(map(str, sort))
else:
raise OptionError("invalid argument for 'sort' option")
class Order(Option, metaclass=OptionType):
"""``order`` option to polynomial manipulation functions."""
option = 'order'
requires = []
excludes = []
@classmethod
def default(cls):
from .orderings import lex
return lex
@classmethod
def preprocess(cls, order):
from .orderings import monomial_key
return monomial_key(order)
class Field(BooleanOption, metaclass=OptionType):
"""``field`` option to polynomial manipulation functions."""
option = 'field'
requires = []
excludes = ['domain', 'split', 'gaussian']
class Greedy(BooleanOption, metaclass=OptionType):
"""``greedy`` option to polynomial manipulation functions."""
option = 'greedy'
requires = []
excludes = ['domain', 'split', 'gaussian', 'extension', 'modulus']
class Composite(BooleanOption, metaclass=OptionType):
"""``composite`` option to polynomial manipulation functions."""
option = 'composite'
@classmethod
def default(cls):
return
requires = []
excludes = ['domain', 'split', 'gaussian', 'modulus']
class Domain(Option, metaclass=OptionType):
"""``domain`` option to polynomial manipulation functions."""
option = 'domain'
requires = []
excludes = ['field', 'greedy', 'split', 'gaussian', 'extension']
after = ['gens']
_re_realfield = re.compile(r'^(R|RR)(_(\d+))?$')
_re_complexfield = re.compile(r'^(C|CC)(_(\d+))?$')
_re_finitefield = re.compile(r'^(FF|GF)\((\d+)\)$')
_re_polynomial = re.compile(r'^(Z|ZZ|Q|QQ)\[(.+)\]$')
_re_fraction = re.compile(r'^(Z|ZZ|Q|QQ)\((.+)\)$')
_re_algebraic = re.compile(r'^(Q|QQ)\<(.+)\>$')
@classmethod
def preprocess(cls, domain):
from .. import domains
if isinstance(domain, domains.Domain):
return domain
elif isinstance(domain, str):
if domain in ['Z', 'ZZ']:
return domains.ZZ
if domain in ['Q', 'QQ']:
return domains.QQ
if domain == 'EX':
return domains.EX
r = cls._re_realfield.match(domain)
if r is not None:
_, _, prec = r.groups()
if prec is None:
return domains.RR
else:
return domains.RealField(int(prec))
r = cls._re_complexfield.match(domain)
if r is not None:
_, _, prec = r.groups()
if prec is None:
return domains.CC
else:
return domains.ComplexField(int(prec))
r = cls._re_finitefield.match(domain)
if r is not None:
return domains.FF(int(r.groups()[1]))
r = cls._re_polynomial.match(domain)
if r is not None:
ground, gens = r.groups()
gens = list(map(sympify, gens.split(',')))
if ground in ['Z', 'ZZ']:
return domains.ZZ.inject(*gens)
else:
return domains.QQ.inject(*gens)
r = cls._re_fraction.match(domain)
if r is not None:
ground, gens = r.groups()
gens = list(map(sympify, gens.split(',')))
if ground in ['Z', 'ZZ']:
return domains.ZZ.inject(*gens).field
else:
return domains.QQ.inject(*gens).field
r = cls._re_algebraic.match(domain)
if r is not None:
gens = list(map(sympify, r.groups()[1].split(',')))
return domains.QQ.algebraic_field(*gens)
raise OptionError(f'expected a valid domain specification, got {domain}')
@classmethod
def postprocess(cls, options):
from .. import domains
if 'gens' in options and 'domain' in options and options['domain'].is_Composite and \
(set(options['domain'].symbols) & set(options['gens'])):
raise GeneratorsError('ground domain and generators '
'interfere together')
elif ('gens' not in options or not options['gens']) and \
'domain' in options and options['domain'] == domains.EX:
raise GeneratorsError('you have to provide generators because'
' EX domain was requested')
class Split(BooleanOption, metaclass=OptionType):
"""``split`` option to polynomial manipulation functions."""
option = 'split'
requires = []
excludes = ['field', 'greedy', 'domain', 'gaussian', 'extension', 'modulus']
@classmethod
def postprocess(cls, options):
if 'split' in options:
raise NotImplementedError("'split' option is not implemented yet")
class Gaussian(BooleanOption, metaclass=OptionType):
"""``gaussian`` option to polynomial manipulation functions."""
option = 'gaussian'
requires = []
excludes = ['field', 'greedy', 'domain', 'split', 'extension', 'modulus']
@classmethod
def postprocess(cls, options):
if 'gaussian' in options and options['gaussian'] is True:
options['extension'] = {I}
Extension.postprocess(options)
class Extension(Option, metaclass=OptionType):
"""``extension`` option to polynomial manipulation functions."""
option = 'extension'
requires = []
excludes = ['greedy', 'domain', 'split', 'gaussian', 'modulus']
@classmethod
def preprocess(cls, extension):
if extension == 1:
return bool(extension)
elif extension == 0:
return bool(extension)
else:
if not hasattr(extension, '__iter__'):
extension = {extension}
else:
if not extension:
extension = None
else:
extension = set(extension)
return extension
@classmethod
def postprocess(cls, options):
from .. import domains
if 'extension' in options and options['extension'] not in (True, False):
options['domain'] = domains.QQ.algebraic_field(
*options['extension'])
class Modulus(Option, metaclass=OptionType):
"""``modulus`` option to polynomial manipulation functions."""
option = 'modulus'
requires = []
excludes = ['greedy', 'split', 'domain', 'gaussian', 'extension']
@classmethod
def preprocess(cls, modulus):
modulus = sympify(modulus)
if modulus.is_Integer and modulus > 0:
return int(modulus)
else:
raise OptionError(
f"'modulus' must a positive integer, got {modulus}")
@classmethod
def postprocess(cls, options):
from .. import domains
if 'modulus' in options:
modulus = options['modulus']
options['domain'] = domains.FF(modulus)
class Strict(BooleanOption, metaclass=OptionType):
"""``strict`` option to polynomial manipulation functions."""
option = 'strict'
@classmethod
def default(cls):
return True
class Auto(BooleanOption, Flag, metaclass=OptionType):
"""``auto`` flag to polynomial manipulation functions."""
option = 'auto'
after = ['field', 'domain', 'extension', 'gaussian']
@classmethod
def default(cls):
return True
@classmethod
def postprocess(cls, options):
if ('domain' in options or 'field' in options) and 'auto' not in options:
options['auto'] = False
class Frac(BooleanOption, Flag, metaclass=OptionType):
"""``frac`` option to polynomial manipulation functions."""
option = 'frac'
@classmethod
def default(cls):
return False
class Formal(BooleanOption, Flag, metaclass=OptionType):
"""``formal`` flag to polynomial manipulation functions."""
option = 'formal'
@classmethod
def default(cls):
return False
class Polys(BooleanOption, Flag, metaclass=OptionType):
"""``polys`` flag to polynomial manipulation functions."""
option = 'polys'
class Include(BooleanOption, Flag, metaclass=OptionType):
"""``include`` flag to polynomial manipulation functions."""
option = 'include'
@classmethod
def default(cls):
return False
class All(BooleanOption, Flag, metaclass=OptionType):
"""``all`` flag to polynomial manipulation functions."""
option = 'all'
@classmethod
def default(cls):
return False
class Gen(Flag, metaclass=OptionType):
"""``gen`` flag to polynomial manipulation functions."""
option = 'gen'
@classmethod
def default(cls):
return 0
@classmethod
def preprocess(cls, gen):
if isinstance(gen, (Basic, int)):
return gen
else:
raise OptionError("invalid argument for 'gen' option")
class Symbols(Flag, metaclass=OptionType):
"""``symbols`` flag to polynomial manipulation functions."""
option = 'symbols'
@classmethod
def default(cls):
return numbered_symbols('s', start=1)
@classmethod
def preprocess(cls, symbols):
if hasattr(symbols, '__iter__'):
return iter(symbols)
else:
raise OptionError('expected an iterator or '
f'iterable container, got {symbols}')
class Method(Flag, metaclass=OptionType):
"""``method`` flag to polynomial manipulation functions."""
option = 'method'
@classmethod
def preprocess(cls, method):
if isinstance(method, str):
return method.lower()
else:
raise OptionError(f'expected a string, got {method}')
def build_options(gens, args=None):
"""Construct options from keyword arguments or ... options."""
if args is None:
gens, args = (), gens
if len(args) != 1 or 'opt' not in args or gens:
return Options(gens, args)
else:
return args['opt']
def allowed_flags(args, flags):
"""
Allow specified flags to be used in the given context.
Examples
========
>>> allowed_flags({'domain': ZZ}, [])
>>> allowed_flags({'domain': ZZ, 'frac': True}, [])
Traceback (most recent call last):
...
FlagError: 'frac' flag is not allowed in this context
>>> allowed_flags({'domain': ZZ, 'frac': True}, ['frac'])
"""
flags = set(flags)
for arg in args:
try:
if Options.__options__[arg].is_Flag and arg not in flags:
raise FlagError(
f"'{arg}' flag is not allowed in this context")
except KeyError:
raise OptionError(f"'{arg}' is not a valid option")
def set_defaults(options, **defaults):
"""Update options with default values."""
if 'defaults' not in options:
options = dict(options)
options['defaults'] = defaults
return options
Options._init_dependencies_order()
|
skirpichev/omg
|
diofant/polys/polyoptions.py
|
Python
|
bsd-3-clause
| 19,400
|
[
"Gaussian"
] |
d95f2cfbc7b92b3ae8ed2bc4edd01a28db103c87bb26e94cb8179ef7627727f5
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
'''
This module contains the one-parameter exponential families used
for fitting GLMs and GAMs.
These families are described in
P. McCullagh and J. A. Nelder. "Generalized linear models."
Monographs on Statistics and Applied Probability.
Chapman & Hall, London, 1983.
'''
from .family import (Gaussian, Family, Poisson, Gamma, InverseGaussian,
Binomial)
|
bthirion/nipy
|
nipy/algorithms/statistics/models/family/__init__.py
|
Python
|
bsd-3-clause
| 503
|
[
"Gaussian"
] |
a2ec2ee37b68095809e233b663a52eec668817820e23f068b73fecad64cd9f1b
|
"""=================================================
pipeline_testing - automated testing of pipelines
=================================================
This pipeline executes other pipelines for testing purposes.
Overview
========
This pipeline implements automated testing of CGAT pipelines. The
pipeline downloads test data from a specified URL, runs the associated
pipeline for each data set and compares the output with a reference.
The results are collected in a report.
Tests are setup in the pipeline configuration file.
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general
information how to use CGAT pipelines.
In order to run all tests, simply enter an empty directory and type::
python <srcdir>/pipeline_testing.py config
Edit the config files as required and then type::
python <srcdir>/pipeline_testing.py make full
python <srcdir>/pipeline_testing.py make build_report
The first command will download the data and run the pipelines while
the second will build a summary report.
Configuration
-------------
The pipeline requires a configured :file:`pipeline.ini` file.
Tests are described as section in the configuration file. A test
section starts with the prefix ``test_``. For following example is a
complete test setup::
[test_mytest1]
# pipeline to run
pipeline=pipeline_mapping
# pipeline target to run (default is 'full')
# multiple targets can be specified as a comma separated list.
target=full
# filename suffixes to checksum
regex_md5=gtf.gz,bed.gz,tsv.gz,bam,nreads
# regular expression of files to be excluded from
# test for difference. Use | to separate multiple
# regular expressions.
regex_only_exist=rates.gff.gz
This configuration will run the test ``mytest1``. The associated
pipeline is :doc:`pipeline_mapping` and it will execute the target
``make full``. To check if the pipeline has completed successfully, it
will compare all files ending with any of the suffixes specified
(``gtf.gz``, ``bed.gz``, etc). The comparison will be done by building
a checksum of the whole file ignoring any comments (lines starting
with a ``#``).
Some files will be different at every run, for example if they use
some form of random initialization. Thus, the exact test can be
relaxed for groups of files. Files matching the regular expression in
``regex_linecount`` will test if a file exists and the number of lines
are identitical. Files matching the regular expressions in
``regex_exist`` will thus only be tested if they exist or not.
The test expects a file called :file:`test_mytest1.tgz` with the
test data at the download URL (parameter ``data_url``).
To define a default test for a pipeline, simply name the
test ``test_<pipeline name>``, for example::
[test_mapping]
regex_md5=gtf.gz,bed.gz,tsv.gz,bam,nreads
Note that setting the ``target`` and ``pipeline`` options is
not necessary in this case as the default values suffice.
Input data
----------
The input data for each test resides a compressed tar-ball. The input
data should uncompress in a directory called :file:`<testname>.dir`
The tar-ball need also contain a file :file:`<testname>.ref`
containing the md5 checksums of files of a previous run of the test
that is being used as a reference.
The input data should contain all the data that is required for
running a test within a directory. It is best to minimize dependencies
between tests, though there is a mechanism for this (see below).
For example, the contents of a tar-ball will look light this::
test_mytest1.dir/ # test data root
test_mytest1.dir/Brain-F2-R1.fastq.gz # test data
test_mytest1.dir/Brain-F1-R1.fastq.gz
test_mytest1.dir/hg19.fasta # genomic data
test_mytest1.dir/hg19.idx
test_mytest1.dir/hg19.fa
test_mytest1.dir/hg19.fa.fai
test_mytest1.dir/pipeline.ini # pipeline configuration file
test_mytest1.dir/indices/ # configured to work in test dir
test_mytest1.dir/indices/bwa/ # bwa indices
test_mytest1.dir/indices/bwa/hg19.bwt
test_mytest1.dir/indices/bwa/hg19.ann
test_mytest1.dir/indices/bwa/hg19.pac
test_mytest1.dir/indices/bwa/hg19.sa
test_mytest1.dir/indices/bwa/hg19.amb
test_mytest1.ref # reference file
The reference file looks like this::
test_mytest1.dir/bwa.dir/Brain-F2-R2.bwa.bam 0e1c4ee88f0249c21e16d93ac496eddf
test_mytest1.dir/bwa.dir/Brain-F1-R2.bwa.bam 01bee8af5bbb5b1d13ed82ef1bc3620d
test_mytest1.dir/bwa.dir/Brain-F2-R1.bwa.bam 80902c87519b6865a9ca982787280972
test_mytest1.dir/bwa.dir/Brain-F1-R1.bwa.bam 503c99ab7042a839e56147fb1a221f27
...
This file is created by the test pipeline and called
:file:`test_mytest1.md5`. When setting up a test, start with an empty
files and later add this file to the test data.
Pipeline dependencies
---------------------
Some pipelines depend on the output of other pipelines, most notable
is :doc:`pipeline_annotations`. To run a set of pipelines before other
pipelines name them in the option ``prerequisites``, for example::
prerequisites=prereq_annnotations
Pipeline output
===============
The major output is in the database file :file:`csvdb`.
Code
====
"""
from ruffus import files, transform, suffix, follows, merge, collate, regex, mkdir
import sys
import pipes
import os
import re
import glob
import tarfile
import pandas
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
###################################################
###################################################
###################################################
# Pipeline configuration
###################################################
# load options from the config file
import CGATPipelines.Pipeline as P
P.getParameters(
["%s/pipeline.ini" % os.path.splitext(__file__)[0],
"../pipeline.ini",
"pipeline.ini"])
PARAMS = P.PARAMS
# obtain prerequisite generic data
@files([(None, "%s.tgz" % x)
for x in P.asList(PARAMS.get("prerequisites", ""))])
def setupPrerequisites(infile, outfile):
'''setup pre-requisites.
These are tar-balls that are unpacked, but not run.
'''
to_cluster = False
track = P.snip(outfile, ".tgz")
# obtain data - should overwrite pipeline.ini file
statement = '''
wget --no-check-certificate -O %(track)s.tgz %(data_url)s/%(track)s.tgz'''
P.run()
tf = tarfile.open(outfile)
tf.extractall()
@files([(None, "%s.tgz" % x)
for x in P.CONFIG.sections()
if x.startswith("test")])
def setupTests(infile, outfile):
'''setup tests.
This method creates a directory in which a test will be run
and downloads test data with configuration files.
'''
to_cluster = False
track = P.snip(outfile, ".tgz")
if os.path.exists(track + ".dir"):
raise OSError('directory %s.dir already exists, please delete' % track)
# create directory
os.mkdir(track + ".dir")
# run pipeline config
pipeline_name = PARAMS.get(
"%s_pipeline" % track,
"pipeline_" + track[len("test_"):])
statement = '''
(cd %(track)s.dir;
python %(pipelinedir)s/%(pipeline_name)s.py
%(pipeline_options)s config) >& %(outfile)s.log
'''
P.run()
# obtain data - should overwrite pipeline.ini file
statement = '''
wget --no-check-certificate -O %(track)s.tgz %(data_url)s/%(track)s.tgz'''
P.run()
tf = tarfile.open(outfile)
tf.extractall()
if not os.path.exists("%s.dir" % track):
raise ValueError(
"test package did not create directory '%s.dir'" % track)
def runTest(infile, outfile):
'''run a test.
Multiple targets are run iteratively.
'''
track = P.snip(outfile, ".log")
pipeline_name = PARAMS.get(
"%s_pipeline" % track,
"pipeline_" + track[len("test_"):])
pipeline_targets = P.asList(
PARAMS.get("%s_target" % track,
"full"))
# do not run on cluster, mirror
# that a pipeline is started from
# the head node
to_cluster = False
template_statement = '''
(cd %%(track)s.dir;
python %%(pipelinedir)s/%%(pipeline_name)s.py
%%(pipeline_options)s make %s) >& %%(outfile)s
'''
if len(pipeline_targets) == 1:
statement = template_statement % pipeline_targets[0]
P.run(ignore_errors=True)
else:
statements = []
for pipeline_target in pipeline_targets:
statements.append(template_statement % pipeline_target)
P.run(ignore_errors=True)
# @follows(setupTests)
# @files([("%s.tgz" % x, "%s.log" % x)
# for x in P.asList(PARAMS.get("prerequisites", ""))])
# def runPreparationTests(infile, outfile):
# '''run pre-requisite pipelines.'''
# runTest(infile, outfile)
@follows(setupTests, setupPrerequisites)
@files([("%s.tgz" % x, "%s.log" % x)
for x in P.CONFIG.sections()
if x.startswith("test") and
x not in P.asList(PARAMS.get("prerequisites", ""))])
def runTests(infile, outfile):
'''run a pipeline with test data.'''
runTest(infile, outfile)
@transform(runTests,
suffix(".log"),
".report")
def runReports(infile, outfile):
'''run a pipeline report.'''
track = P.snip(outfile, ".report")
pipeline_name = PARAMS.get(
"%s_pipeline" % track,
"pipeline_" + track[len("test_"):])
statement = '''
(cd %(track)s.dir; python %(pipelinedir)s/%(pipeline_name)s.py
%(pipeline_options)s make build_report) >& %(outfile)s
'''
P.run(ignore_errors=True)
def compute_file_metrics(infile, outfile, metric, suffixes):
"""apply a tool to compute metrics on a list of files matching
regex_pattern."""
if suffixes is None or len(suffixes) == 0:
E.info("No metrics computed for {}".format(outfile))
IOTools.touchFile(outfile)
return
track = P.snip(infile, ".log")
# convert regex patterns to a suffix match:
# prepend a .*
# append a $
regex_pattern = " -or ".join(["-regex .*{}$".format(pipes.quote(x))
for x in suffixes])
E.debug("applying metric {} to files matching {}".format(metric,
regex_pattern))
if metric == "file":
statement = '''find %(track)s.dir
-type f
-not -regex '.*\/report.*'
-not -regex '.*\/_.*'
\( %(regex_pattern)s \)
| sort -k1,1
> %(outfile)s'''
else:
statement = '''find %(track)s.dir
-type f
-not -regex '.*\/report.*'
-not -regex '.*\/_.*'
\( %(regex_pattern)s \)
-exec %(pipeline_scriptsdir)s/cgat_file_apply.sh {} %(metric)s \;
| perl -p -e "s/ +/\\t/g"
| sort -k1,1
> %(outfile)s'''
P.run()
@follows(runReports)
@transform(runTests,
suffix(".log"),
".md5")
def buildCheckSums(infile, outfile):
'''build checksums for files in the build directory.
Files are uncompressed before computing the checksum
as gzip stores meta information such as the time stamp.
'''
track = P.snip(infile, ".log")
compute_file_metrics(
infile,
outfile,
metric="md5sum",
suffixes=P.asList(P.asList(PARAMS.get('%s_regex_md5' % track, ""))))
@transform(runTests,
suffix(".log"),
".lines")
def buildLineCounts(infile, outfile):
'''compute line counts.
Files are uncompressed before computing the number of lines.
'''
track = P.snip(infile, ".log")
compute_file_metrics(
infile,
outfile,
metric="wc -l",
suffixes=P.asList(P.asList(PARAMS.get('%s_regex_linecount' % track, ""))))
@transform(runTests,
suffix(".log"),
".exist")
def checkFileExistence(infile, outfile):
'''check whether file exists.
Files are uncompressed before checking existence.
'''
track = P.snip(infile, ".log")
compute_file_metrics(
infile,
outfile,
metric="file",
suffixes=P.asList(P.asList(PARAMS.get('%s_regex_exist' % track, ""))))
@collate((buildCheckSums, buildLineCounts, checkFileExistence),
regex("([^.]*).(.*)"),
r"\1.stats")
def mergeFileStatistics(infiles, outfile):
'''merge all file statistics.'''
to_cluster = False
infiles = " ".join(sorted(infiles))
statement = '''
%(pipeline_scriptsdir)s/merge_testing_output.sh
%(infiles)s
> %(outfile)s'''
P.run()
@merge(mergeFileStatistics,
"md5_compare.tsv")
def compareCheckSums(infiles, outfile):
'''compare checksum files against existing reference data.
'''
to_cluster = False
outf = IOTools.openFile(outfile, "w")
outf.write("\t".join((
("track", "status",
"job_finished",
"nfiles", "nref",
"missing", "extra",
"different",
"different_md5",
"different_lines",
"same",
"same_md5",
"same_lines",
"same_exist",
"files_missing",
"files_extra",
"files_different_md5",
"files_different_lines"))) + "\n")
for infile in infiles:
E.info("working on {}".format(infile))
track = P.snip(infile, ".stats")
logfiles = glob.glob(track + "*.log")
job_finished = True
for logfile in logfiles:
is_complete = IOTools.isComplete(logfile)
E.debug("logcheck: {} = {}".format(logfile, is_complete))
job_finished = job_finished and is_complete
reffile = track + ".ref"
# regular expression of files to test only for existence
regex_exist = PARAMS.get('%s_regex_exist' % track, None)
if regex_exist:
regex_exist = re.compile("|".join(P.asList(regex_exist)))
regex_linecount = PARAMS.get('%s_regex_linecount' % track, None)
if regex_linecount:
regex_linecount = re.compile("|".join(P.asList(regex_linecount)))
regex_md5 = PARAMS.get('%s_regex_md5' % track, None)
if regex_md5:
regex_md5 = re.compile("|".join(P.asList(regex_md5)))
if not os.path.exists(reffile):
raise ValueError('no reference data defined for %s' % track)
cmp_data = pandas.read_csv(IOTools.openFile(infile),
sep="\t",
index_col=0)
ref_data = pandas.read_csv(IOTools.openFile(reffile),
sep="\t",
index_col=0)
shared_files = set(cmp_data.index).intersection(ref_data.index)
missing = set(ref_data.index).difference(cmp_data.index)
extra = set(cmp_data.index).difference(ref_data.index)
different = set(shared_files)
# remove those for which only check for existence
if regex_exist:
same_exist = set([x for x in different
if regex_exist.search(x)])
different = set([x for x in different
if not regex_exist.search(x)])
else:
same_exist = set()
# select those for which only check for number of lines
if regex_linecount:
check_lines = [x for x in different
if regex_linecount.search(x)]
dd = (cmp_data['nlines'][check_lines] !=
ref_data['nlines'][check_lines])
different_lines = set(dd.index[dd])
different = different.difference(check_lines)
dd = (cmp_data['nlines'][check_lines] ==
ref_data['nlines'][check_lines])
same_lines = set(dd.index[dd])
else:
different_lines = set()
same_lines = set()
# remainder - check md5
if regex_md5:
check_md5 = [x for x in different
if regex_md5.search(x)]
dd = (cmp_data['md5'][check_md5] !=
ref_data['md5'][check_md5])
different_md5 = set(dd.index[dd])
dd = (cmp_data['md5'][check_md5] ==
ref_data['md5'][check_md5])
same_md5 = set(dd.index[dd])
else:
different_md5 = set()
same_md5 = set()
if job_finished and (len(missing) + len(extra) + \
len(different_md5) + len(different_lines) == 0):
status = "OK"
else:
status = "FAIL"
outf.write("\t".join(map(str, (
track,
status,
job_finished,
len(cmp_data),
len(ref_data),
len(missing),
len(extra),
len(different_md5) + len(different_lines),
len(different_md5),
len(different_lines),
len(same_md5) + len(same_lines) + len(same_exist),
len(same_md5),
len(same_lines),
len(same_exist),
",".join(missing),
",".join(extra),
",".join(different_md5),
",".join(different_lines),
))) + "\n")
outf.close()
@transform(compareCheckSums,
suffix(".tsv"),
".load")
def loadComparison(infile, outfile):
'''load comparison data into database.'''
P.load(infile, outfile)
@transform(mergeFileStatistics,
suffix(".stats"),
"_results.load")
def loadResults(infile, outfile):
'''load comparison data into database.'''
P.load(infile, outfile, options="--add-index=file")
@transform(mergeFileStatistics,
suffix(".ref"),
"_reference.load")
def loadReference(infile, outfile):
'''load comparison data into database.'''
P.load(infile, outfile, options="--add-index=file")
@follows(runTests, runReports)
def run_components():
pass
@follows(run_components, loadComparison, loadResults, loadReference)
def full():
pass
@files(None, 'reset.log')
def reset(infile, outfile):
'''remove all data in pipeline.'''
to_cluster = False
statement = '''
rm -rf prereq_* ctmp*;
rm -rf test_* _cache _static _templates _tmp report;
rm -f *.log csvdb *.load *.tsv'''
P.run()
###################################################################
###################################################################
###################################################################
# primary targets
###################################################################
@follows(mkdir("report"))
def build_report():
'''build report from scratch.'''
E.info("starting report build process from scratch")
P.run_report(clean=True)
@follows(mkdir("report"))
def update_report():
'''update report.'''
E.info("updating report")
P.run_report(clean=False)
@follows(update_report)
def publish_report():
'''publish report.'''
E.info("publishing report")
P.publish_report()
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
|
AntonioJBT/CGATPipeline_core
|
CGATPipelines/pipeline_testing.py
|
Python
|
mit
| 19,055
|
[
"BWA"
] |
b5ed18fe94bd640f28da8412a22e5bf2a11610099f1cbbaac14d5c6d15850c16
|
#!/usr/bin/env python
"""
Created on Thu Dec 19 14:31:36 2013
Author: Oren Freifeld
Email: freifeld@csail.mit.edu
"""
import numpy as np
from pylab import plt
from scipy import sparse
from of.utils import *
from cpab.cpaNd import CpaSpace as CpaSpaceNd
from cpab.cpaNd.utils import null
from cpab.cpa2d.utils import *
from cpab.cpa2d.ConfigPlt import ConfigPlt
from cpab.cpa2d.Tessellation import Tessellation
class CpaSpace(CpaSpaceNd):
dim_domain=2
dim_range=2
nHomoCoo = dim_domain+1
lengthAvee = dim_domain * nHomoCoo
Ashape = dim_domain,nHomoCoo
def __init__(self,XMINS,XMAXS,nCs,
zero_v_across_bdry,
vol_preserve,warp_around=[False]*2,
conformal=False,
zero_vals=[],cpa_calcs=None,
tess=['II','I'][0],
valid_outside=None,
only_local=False,
cont_constraints_are_separable=None):
if cont_constraints_are_separable is None:
raise ObsoleteError("""
Expected True/False value for cont_constraints_are_separable;
got None instead""")
if tess == 'II' and valid_outside is not None:
print "tess='II' --> ignoring the value of valid_outside"
if tess == 'I':
if valid_outside is None:
raise ValueError("tess='I' so you must pass valid_outside=True/False" )
self.valid_outside=valid_outside
nCx,nCy=map(int,nCs)
debug_cont_constraints_are_separable=False
if cont_constraints_are_separable:
print 'Check if can actually use separable continuity:'
if any(zero_v_across_bdry):
cont_constraints_are_separable=False
print 'any(zero_v_across_bdry) is True'
if vol_preserve:
cont_constraints_are_separable=False
print 'vol_preserve is True'
if nCx!=nCy:
cont_constraints_are_separable=False
print 'nCx!=nCy'
if XMINS[0]!=XMINS[1]:
cont_constraints_are_separable=False
print 'XMINS[0]!=XMINS[1]'
if XMAXS[0]!=XMAXS[1]:
cont_constraints_are_separable=False
print 'XMAXS[0]!=XMAXS[1]'
if not cont_constraints_are_separable:
debug_cont_constraints_are_separable=False
print 'so I could not use separable continuity.'
else:
print '\nWill use separable continuity.\n'
super(CpaSpace,self).__init__(XMINS,XMAXS,nCs,
zero_v_across_bdry,
vol_preserve=vol_preserve,
warp_around=warp_around,
conformal=conformal,
zero_vals=zero_vals,
cpa_calcs=cpa_calcs,tess=tess,
valid_outside=valid_outside,
only_local=only_local,
cont_constraints_are_separable=cont_constraints_are_separable)
tessellation = Tessellation(nCx,nCy,self.nC,self.XMINS,self.XMAXS,tess=tess)
self.tessellation=tessellation
try:
# raise FileDoesNotExistError("fake file")
subspace=Pkl.load(self.filename_subspace,verbose=1)
B=subspace['B']
nConstraints=subspace['nConstraints']
nEdges=subspace['nEdges']
constraintMat=subspace['constraintMat']
try:
cont_constraints_are_separable=subspace['cont_constraints_are_separable']
except KeyError:
cont_constraints_are_separable=False
except FileDoesNotExistError:
nC = self.nC
verts1,verts2,H,nEdges,nConstraints = self.tessellation.create_verts_and_H(
dim_range=self.dim_range,valid_outside=valid_outside)
if cont_constraints_are_separable == False or debug_cont_constraints_are_separable:
L = create_cont_constraint_mat(H,verts1,verts2,nEdges,nConstraints,nC,
dim_domain=self.dim_domain,
dim_range=self.dim_range)
if cont_constraints_are_separable:
Lx = create_cont_constraint_mat_separable(H,verts1,verts2,nEdges,nConstraints,
nC,dim_domain=self.dim_domain,
dim_range=self.dim_range,tess=tess)
if len(zero_vals):
Lzerovals = create_constraint_mat_zerovals(nC,dim_domain=self.dim_domain,
dim_range=self.dim_range,
zero_vals=zero_vals)
L = np.vstack([L,Lzerovals])
nConstraints += Lzerovals.shape[0]
if any(zero_v_across_bdry):
# Lbdry = self.tessellation.create_constraint_mat_bdry(
# zero_v_across_bdry=self.zero_v_across_bdry)
#
# L = np.vstack([L,Lbdry])
if cont_constraints_are_separable == False or debug_cont_constraints_are_separable:
Lbdry = self.tessellation.create_constraint_mat_bdry(
zero_v_across_bdry=self.zero_v_across_bdry)
L = np.vstack([L,Lbdry])
if cont_constraints_are_separable:
Lb = self.tessellation.create_constraint_mat_bdry_separable(
zero_v_across_bdry=self.zero_v_across_bdry)
raise NotImplementedError(zero_v_across_bdry, cont_constraints_are_separable)
nConstraints += Lbdry.shape[0]
if self.warp_around[0] or self.warp_around[1]:
raise NotImplementedError
Lwa = create_constraint_mat_warp_around(cells_verts,
nC,dim_domain=self.dim_domain)
L = np.vstack([L,Lwa])
nConstraints += Lwa.shape[0]
if vol_preserve:
Lvol = create_constraint_mat_preserve_vol(nC,dim_domain=self.dim_domain)
L = np.vstack([L,Lvol])
nConstraints += Lvol.shape[0]
if conformal:
Lconf = create_constraint_mat_conformal(nC,dim_domain=self.dim_domain,dim_range=self.dim_range)
L = np.vstack([L,Lconf])
nConstraints += Lconf.shape[0]
if self.only_local==False:
if not cont_constraints_are_separable:
B=null(L)
else: # to solve a nuch smaller SVD and to get a sparser basis
if vol_preserve or any(zero_v_across_bdry):
raise NotImplementedError
B1=null(Lx)
# B1.shape is (nC*nHomoCoo)x dim_null_space
if debug_cont_constraints_are_separable:
B=null(L)
if B1.shape[0]!=B.shape[0]/2:
raise ValueError(B1.shape,B.shape)
if float(B1.shape[1])*self.dim_range != B.shape[1]:
raise ValueError(B1.shape,B.shape)
_B = np.zeros((B1.shape[0]*2,B1.shape[1]*self.dim_range),B1.dtype)
for j in range(B1.shape[1]):
Avees = B1[:,j] # length=self.nC*self.nHomoCoo
arr=Avees.reshape(self.nC,self.nHomoCoo)
for k in range(self.dim_range):
arr2=np.hstack([arr if m==k else np.zeros_like(arr) for m in range(self.dim_range)])
arr3=arr2.reshape(self.nC,self.lengthAvee)
arr4=arr3.flatten()
_B[:,j+k*B1.shape[1]]=arr4
if debug_cont_constraints_are_separable:
if B.shape != _B.shape:
raise ValueError(B.shape,_B.shape)
B=_B
else:
if tess != 'I':
raise NotImplementedError
B = None
if cont_constraints_are_separable:
L=Lx
constraintMat=sparse.csr_matrix(L)
Pkl.dump(self.filename_subspace,{'B':B,'cont_constraints_are_separable':cont_constraints_are_separable,
'nConstraints':nConstraints,
'nEdges':nEdges,
'constraintMat':constraintMat},
override=True)
# Since B encodes the null space of, it follows that
# np.allclose(L.dot(B),0)==True
super(CpaSpace,self).__finish_init__(tessellation=tessellation,
constraintMat=constraintMat,
nConstraints=nConstraints,
nInterfaces=nEdges,
B=B,zero_vals=zero_vals)
self.cont_constraints_are_separable=cont_constraints_are_separable
self.x_dense = self._calcs.x_dense
self.x_dense_grid = self._calcs.x_dense_grid
self.x_dense_img = self._calcs.x_dense_img
self.x_dense_grid_img = self._calcs.x_dense_grid_img
self.grid_shape = self.x_dense_grid_img[0].shape
verts=self.tessellation.cells_verts_homo_coo
if 0: # testing
for i in range(0,self.nC):
for j in range(0,i):
verts1=verts[i]
verts2=verts[j]
shared=[]
for v1 in verts1:
for v2 in verts2:
if (v1==v2).all():
shared.append(v1)
shared = np.asarray(shared).T
if len(shared)==0:
continue
# theta =self.get_zeros_theta()
for m in range(self.d):
# theta[j]=1
Avees=self.get_zeros_PA()
Avees[:]=self.B[:,m]
# self.theta2Avees(Avees=Avees,theta=theta)
As=self.Avees2As(Avees=Avees)
Ai=As[i]
Aj=As[j]
#Ai.dot(shared) is 3 x 3 = dim x #verts_per_side
# At the moment, the problem is w/ the last entry of the 4 vert (100,100,0,1)
if not np.allclose((Ai-Aj).dot(shared),0):
ipshell('FAILED ALL CLOSE TEST')
raise ValueError
def get_x_dense(self):
return self.x_dense
def get_x_dense_grid(self):
return self.x_dense_grid
def get_x_dense_img(self):
return self.x_dense_img
def get_x_dense_grid_img(self):
return self.x_dense_grid_img
def __repr__(self):
s = "cpa space (tess type {}):".format(self.tess)
s += '\n\tCells: {}x{} (nC={})'.format(self.tessellation.nCx,self.tessellation.nCy,self.tessellation.nC)
s += '\n\td: {} D: {}'.format(self.d,self.D)
if any(self.zero_v_across_bdry):
if not all(self.zero_v_across_bdry):
raise NotImplementedError("Mixed bdry types")
s += '\n\tzero bdry cond: True'
s += '\n\tvolume-preserving: {}'.format(self.vol_preserve)
if self.tess=='I':
s+='\n\tvalid extention: {}'.format(self.valid_outside)
return s
def calc_tess(self,permute=False):
raise ObsoleteError
pts = self.get_x_dense_img()
cell_idx = np.empty(len(pts),dtype=np.int32)
self.calc_cell_idx(pts,cell_idx)
if permute:
p=np.random.permutation(self.nC)
cell_idx2=np.zeros_like(cell_idx)
for c in range(self.nC):
cell_idx2[cell_idx==c]=p[c]
cell_idx=cell_idx2
if self.XMINS.any():
raise NotImplementedError
Nx,Ny=self.XMAXS
img_idx=cell_idx.reshape(Ny,Nx)
return img_idx
def quiver(self,x,v,scale,ds=16,color='k',negate_vy=False,pivot='middle',
head=True,width=None):
"""
If width is None, its its value will be dictated by the value of
head
"""
if head:
headlength=5
headwidth=3
headaxislength=4.5
if width is None:
width=.005
else:
headlength=0
headwidth=0
headaxislength=0
if width is None:
width=.003
if x is None:
raise ValueError
# if x is None:
# x=self.xx
# y=self.yy
# if x.size != v[:,0].size:
# x=self.x_img
# y=self.y_img
#
# else:
# if x.ndim != 2:
# raise ValueError(x.shape)
# if x.shape[1]!=2:
# raise ValueError(x.shape)
#
# x,y=x[:,0].copy(),x[:,1].copy()
# if x.size != v[:,0].size:
# raise ValueError(x.shape,v.shape)
if x.size != v.size:
raise ValueError(x.shape,v.shape)
if v.ndim != 2:
raise ValueError(v.shape)
if v.shape[1]!=2:
raise ValueError(v.shape)
if x.shape != v.shape:
if x.ndim !=3 or x.shape[0]!=2:
raise ValueError(x.shape)
# x = np.asarray([x[0].flatten(),x[1].flatten()]).T
v = np.asarray([v.cpu[:,0].reshape(x.shape[1],x.shape[2]),
v.cpu[:,1].reshape(x.shape[1],x.shape[2])])
if x.shape != v.shape:
raise ValueError(x.shape,v.shape)
# if x.ndim != 2:
# raise ValueError(x.shape)
# if y.ndim != 2:
# raise ValueError(x.shape)
# try:
# vx = v[:,0].reshape(x.shape)
# vy = v[:,1].reshape(x.shape)
# except:
# raise ValueError(v.shape,x.shape)
# if x.shape[1]!=2:
# raise NotImplementedError(x.shape)
# if v.shape[1]!=2:
# raise NotImplementedError(x.shape)
if x.ndim !=3 and x.shape[1]!=2:
raise ValueError(x.shape)
if v.ndim !=3 and v.shape[1]!=2:
raise ValueError(v.shape)
# _x,_y = x.T
# vx,vy = v.T
if x.ndim == 2:
_x,_y = x.T
_u,_v = v.T
else:
_x,_y = x
_u,_v = v
if negate_vy:
_v = -_v
# print scale,ds
# 1/0
if _x.ndim==2:
plt.quiver(_x[::ds,::ds],_y[::ds,::ds],_u[::ds,::ds],_v[::ds,::ds],
angles='xy', scale_units='xy',scale=scale,
pivot=pivot,
color=color,
headlength=headlength,
headwidth=headwidth,
headaxislength=headaxislength,
width=width
)
else:
plt.quiver(_x[::ds],_y[::ds],_u[::ds],_v[::ds],
angles='xy', scale_units='xy',scale=scale,
pivot=pivot,
color=color,
headlength=headlength,
headwidth=headwidth,
headaxislength=headaxislength,
width=width
)
def plot_cells(self,color='k',lw=0.5,offset=(0,0)):
ox,oy=offset
if self.tess == 'II':
for c in xrange(self.nC):
xmin,ymin=self._xmins[c]
xmax,ymax=self._xmaxs[c]
# if (xmin == self.XMINS[0] or
# ymin == self.XMINS[1] or
# xmax == self.XMAXS[0] or
# ymax == self.XMAXS[1]):
# plt.plot([xmin,xmax,xmax,xmin,xmin],
# [ymin,ymin,ymax,ymax,ymin], color=color,lw=lw*10)
# else:
plt.plot(np.asarray([xmin,xmax,xmax,xmin,xmin])+ox,
np.asarray([ymin,ymin,ymax,ymax,ymin])+oy, color=color,lw=lw)
else:
for c in xrange(self.nC):
verts=self.tessellation.cells_verts_homo_coo[c,:,:-1]
x=np.asarray([verts[0,0],verts[1,0],verts[2,0],verts[0,0]])
y=np.asarray([verts[0,1],verts[1,1],verts[2,1],verts[0,1]])
plt.plot(x+ox,y+oy, color=color,lw=lw)
def inbound(self,x,i_c,out):
"""
Assumed:
x is 2xnPts
i_c is the index of the cell in quesiton.
Checks, for each element of x, whether it is in the i_c cell.
Result is computed in-place in the last input argument.
"""
raise ObsoleteError("Use compute_inbound instead")
if __name__ == '__main__':
import pylab
from pylab import plt
import of.plt
from cpa.prob_and_stats.CpaCovs import CpaCovs
from cpa.prob_and_stats.cpa_simple_mean import cpa_simple_mean
from cpa.cpa2d.calcs import *
from of import my_mayavi
from mayavi.mlab import mesh
if computer.has_good_gpu_card:
pylab.ion()
# plt.close('all')
plt.clf()
XMINS=[0,0]
XMAXS=[512,512]
# XMAXS=[256,256]
# XMAXS=[256/2,256/2]
nCx,nCy=1,1
nCx,nCy=2,2
# nCx,nCy=3,3
#
## nCx,nCy=10,3
nCx,nCy=3,3
# nCx,nCy=4,4
# nCx,nCy=3,3
# nCx,nCy=6,6
#### nCx,nCy=7,7
# nCx,nCy=16,16
# nCx,nCy=8,8
### nCx,nCy=9,9
# nCx,nCy=10,10
##
# nCx,nCy=16,16
# nCx,nCy=16,16
# nCx,nCy=8,8
tess=['II','I'][1]
if 1 and computer.has_good_gpu_card:
if tess == 'II':
nCx,nCy=16,16
if tess == 'I':
nCx,nCy=8,8
nCx,nCy=16,16
# nCx,nCy=10,10
# nCx,nCy=1,1
# nCx,nCy=6,6 # for tri, this doesn't work well
# nCx,nCy=7,7
# nCx,nCy=8,8
zero_v_across_bdry=[True,True]
zero_v_across_bdry=[False,False]
# zero_v_across_bdry=[True,True]
#
vol_preserve = [False,True][0]
warp_around = [False]*2
Nx=XMAXS[0]
Ny=XMAXS[1]
config_plt = ConfigPlt(Nx=Nx,Ny=Ny)
Ngrids= [ Nx , Ny]
cpa_calcs=CpaCalcs(XMINS=XMINS,XMAXS=XMAXS,Ngrids=Ngrids,use_GPU_if_possible=True)
cpa_space=CpaSpace(XMINS,XMAXS,[nCx,nCy],zero_v_across_bdry,vol_preserve,
warp_around,
cpa_calcs=cpa_calcs,
# zero_vals=[(0,1)],
tess=tess,
valid_outside=0)
del cpa_calcs
if cpa_space.d==0:
raise ValueError('dim is 0')
print cpa_space
cpa_covs = CpaCovs(cpa_space,scale_spatial=1.0 * 1*10*0,
scale_value=0.01*10*2*4*10/100,
left_blk_rel_scale=1.0/100,
right_vec_scale=1)
mu = cpa_simple_mean(cpa_space)
Avees=cpa_space.theta2Avees(mu)
np.random.seed(10)
theta = np.random.multivariate_normal(mean=mu,cov=cpa_covs.cpa_cov)
cpa_space.theta2Avees(theta,Avees)
cpa_space.update_pat(Avees=Avees)
pts=CpuGpuArray(cpa_space.x_dense_img)
# yy,xx=np.mgrid[-100:cpa_space.XMAXS[1]+100:1,
# -100:cpa_space.XMAXS[0]+100:1]
# pts = np.vstack([xx.flatten(),yy.flatten()]).T.copy().astype(np.float)
cell_idx = CpuGpuArray.zeros(len(pts),dtype=np.int32)
cpa_space.calc_cell_idx(pts,cell_idx)
cell_idx.gpu2cpu()
v_dense = CpuGpuArray.zeros_like(pts)
print 'calc v:'
tic = time.clock()
cpa_space.calc_v(pts=pts,out=v_dense)
toc = time.clock()
print 'time', toc-tic
params_flow_int = get_params_flow_int()
# params_flow_int.nTimeSteps *=10
params_flow_int.dt *=100
params_flow_int.nStepsODEsolver=10
src = CpuGpuArray(cpa_space.x_dense_img)
transformed = CpuGpuArray.empty_like(src)
print params_flow_int
print '#pts=',len(pts)
tic=time.clock()
cpa_space.calc_T_fwd(pts=src,out=transformed,**params_flow_int)
toc = time.clock()
print "time (done in gpu, not cpu/gpu transfer')",toc-tic
v_dense.gpu2cpu() # for display
pts.gpu2cpu() # for display
# ds=16
ds=8
pts0 = cpa_space.x_dense_grid_img[:,::ds,::ds].reshape(cpa_space.dim_domain,-1).T
pts0 = CpuGpuArray(pts0.copy())
1/0
trajs_full = cpa_space.calc_trajectory(pts=pts0,mysign=1,**params_flow_int)
# v_at_trajs_full = np.zeros_like(trajs_full)
# for _pts,_v in zip(trajs_full,v_at_trajs_full):
# cpa_space.calc_v(pat=pat, pts=_pts, out=_v)
pts_grid=cpa_space.x_dense_grid_img
# pts_grid = np.asarray([xx,yy]).copy()
grid_shape = pts_grid[0].shape
fig = plt.figure()
plt.subplot(234)
# plt.imshow(cell_idx.reshape(Ny,Nx))
plt.imshow(cell_idx.cpu.reshape(grid_shape))
plt.subplot(231)
scale=[2*30,1.5*4][vol_preserve]
cpa_space.quiver(pts_grid,v_dense,scale, ds=16/2)
config_plt()
plt.subplot(232)
plt.imshow(v_dense.cpu[:,0].reshape(grid_shape),interpolation='Nearest',
vmin=v_dense.cpu[:,:].min(),vmax=v_dense.cpu[:,:].max());plt.colorbar()
# cpa_space.plot_cells()
config_plt()
plt.subplot(233)
plt.imshow(v_dense.cpu[:,1].reshape(grid_shape),interpolation='Nearest',
vmin=v_dense.cpu[:,:].min(),vmax=v_dense.cpu[:,:].max());plt.colorbar()
# cpa_space.plot_cells()
config_plt()
plt.subplot(235)
plt.imshow(v_dense.cpu[:,0].reshape(grid_shape),interpolation='Nearest',
vmin=v_dense.cpu[:,:].min(),vmax=v_dense.cpu[:,:].max());plt.colorbar()
cpa_space.plot_cells(color='k')
config_plt()
plt.subplot(236)
plt.imshow(v_dense.cpu[:,1].reshape(grid_shape),interpolation='Nearest',
vmin=v_dense.cpu[:,:].min(),vmax=v_dense.cpu[:,:].max());plt.colorbar()
cpa_space.plot_cells(color='k')
config_plt()
# 1/0
if 0:
my_mayavi.mayavi_mlab_close_all()
xx=cpa_space.x_dense_grid_img[0]
yy=cpa_space.x_dense_grid_img[1]
my_mayavi.mayavi_mlab_figure_bgwhite('vx')
mesh(xx,yy,0 *xx,opacity=0.25)
mesh(xx,yy,v_dense[:,0].reshape(xx.shape))
my_mayavi.mayavi_mlab_figure_bgwhite('vy')
mesh(xx,yy,0 *xx,opacity=0.25)
mesh(xx,yy,v_dense[:,1].reshape(xx.shape))
# plt.figure()
# i = 317
# cpa_space.quiver(trajs_full[:,i],v_at_trajs_full[:,i],scale=10, ds=10)
# cpa_space.quiver(trajs_full.reshape(-1,2),v_at_trajs_full.reshape(-1,2),scale=20, ds=10)
# config_plt()
# for t in range(1,params_flow_int.nTimeSteps+1,5):
for t in [params_flow_int.nTimeSteps+1]:
break
print t
plt.clf()
trajs = trajs_full[:t].copy()
v_at_traj = v_at_trajs_full[t-1]
pts1=trajs[-1]
# v_at_T = cpa_space.calc_v(pat=pat,
# pts = pts1 ,
# out=None )
for num in [221,222,223,224]:
plt.subplot(num)
if num in [224]:
cpa_space.quiver(cpa_space.xx_img,v_dense,
# scale=[2*5,1.5*4][vol_preserve],
scale=[2*10,1.5*4][vol_preserve],
ds=16*2)
if num in [223]:
cpa_space.quiver(pts1,v_at_traj,scale=10, ds=1)
if num in [222]:
plt.plot(pts0[:,0],pts0[:,1],'ro',ms=1)
if num in [222,223]:
nTraj = trajs.shape[1]
for i in range(nTraj):
traj = trajs[:,i]
plt.plot(traj[:,0],traj[:,1],'b',lw=.5)
if num in [221,222]:
plt.plot(pts1[:,0],pts1[:,1],'go',ms=1)
config_plt()
if num==221:
# plt.title('T(x;t)')
plt.title(r"$T(x;t)$")
if num==222:
# plt.title("{T(x;t'): t' in [0,t]}")
plt.title(r"$\{T(x;\tau): \tau\in [0,t]\}$")
if num==223:
plt.title(r"$v(T(x;t))$")
if num == 224:
plt.title(r"$v(\cdot)$")
of.plt.maximize_figure()
fig_filename = (os.path.join(HOME,'tmp','{0:04}.png'.format(t)))
print fig_filename
plt.savefig(fig_filename,dpi=300)
if 0 and computer.has_good_gpu_card:
# ipshell('debug')
raw_input("Press Enter to finish.")
|
freifeld/cpabDiffeo
|
cpab/cpa2d/CpaSpace.py
|
Python
|
mit
| 27,047
|
[
"Mayavi"
] |
aa9880fd4dea6287cedbfe360cdbcebe8d0238dd221940b4a45c3788ce3c0bc3
|
# This script will calculate Shannon entropy from a MSA.
# Dependencies:
# Biopython, Seaborn, Matplotlib, Math
"""
Shannon's entropy equation (latex format):
H=-\sum_{i=1}^{M} P_i\,log_2\,P_i
Entropy is a measure of the uncertainty of a probability distribution (p1, ..... , pM)
https://stepic.org/lesson/Scoring-Motifs-157/step/7?course=Bioinformatics-Algorithms&unit=436
Where, Pi is the fraction of nuleotide bases of nuleotide base type i,
and M is the number of nuleotide base types (A, T, G or C)
H ranges from 0 (only one base/residue in present at that position) to 4.322 (all 20 residues are equally
represented in that position).
Typically, positions with H >2.0 are considerered variable, whereas those with H < 2 are consider conserved.
Highly conserved positions are those with H <1.0 (Litwin and Jores, 1992).
A minimum number of sequences is however required (~100) for H to describe the diversity of a protein family.
"""
import os
import sys
import warnings
import traceback
__author__ = "Joe R. J. Healey"
__version__ = "1.0.0"
__title__ = "ShannonMSA"
__license__ = "GPLv3"
__author_email__ = "J.R.J.Healey@warwick.ac.uk"
def parseArgs():
"""Parse command line arguments"""
import argparse
try:
parser = argparse.ArgumentParser(
description='Compute per base/residue Shannon entropy of a Multiple Sequence Alignment.')
parser.add_argument('-a',
'--alignment',
action='store',
required=True,
help='The multiple sequence alignment (MSA) in any of the formats supported by Biopython\'s AlignIO.')
parser.add_argument('-f',
'--alnformat',
action='store',
default='fasta',
help='Specify the format of the input MSA to be passed in to AlignIO.')
parser.add_argument('-v',
'--verbose',
action='count',
default=0,
help='Verbose behaviour, printing parameters of the script.')
parser.add_argument('-m',
'--runningmean',
action='store',
type=int,
default=0,
help='Return the running mean (a.k.a moving average) of the MSAs Shannon Entropy. Makes for slightly smoother plots. Providing the number of points to average over switches this on.')
parser.add_argument('--makeplot',
action='store_true',
help='Plot the results via Matplotlib.')
except:
print "An exception occurred with argument parsing. Check your provided options."
traceback.print_exc()
return parser.parse_args()
def parseMSA(msa, alnformat, verbose):
"""Parse in the MSA file using Biopython's AlignIO"""
from Bio import AlignIO
alignment = AlignIO.read(msa, alnformat)
# Do a little sanity checking:
seq_lengths_list = []
for record in alignment:
seq_lengths_list.append(len(record))
seq_lengths = set(seq_lengths_list)
if verbose > 0: print("Alignment length is:" + str(list(seq_lengths)))
if len(seq_lengths) != 1:
sys.stderr.write("Your alignment lengths aren't equal. Check your alignment file.")
sys.exit(1)
index = range(1, list(seq_lengths)[0]+1)
return alignment, list(seq_lengths), index
##################################################################
# Function to calcuate the Shannon's entropy per alignment column
# H=-\sum_{i=1}^{M} P_i\,log_2\,P_i (http://imed.med.ucm.es/Tools/svs_help.html)
# Gaps and N's are included in the calculation
##################################################################
def shannon_entropy(list_input):
"""Calculate Shannon's Entropy per column of the alignment (H=-\sum_{i=1}^{M} P_i\,log_2\,P_i)"""
import math
unique_base = set(list_input)
M = len(list_input)
entropy_list = []
# Number of residues in column
for base in unique_base:
n_i = list_input.count(base) # Number of residues of type i
P_i = n_i/float(M) # n_i(Number of residues of type i) / M(Number of residues in column)
entropy_i = P_i*(math.log(P_i,2))
entropy_list.append(entropy_i)
sh_entropy = -(sum(entropy_list))
return sh_entropy
def shannon_entropy_list_msa(alignment):
"""Calculate Shannon Entropy across the whole MSA"""
shannon_entropy_list = []
for col_no in xrange(len(list(alignment[0]))):
list_input = list(alignment[:, col_no])
shannon_entropy_list.append(shannon_entropy(list_input))
return shannon_entropy_list
def plot(index, sel, verbose):
""""Create a quick plot via matplotlib to visualise the extended spectrum"""
import matplotlib.pyplot as plt
if verbose > 0: print("Plotting data...")
plt.plot(index, sel)
plt.xlabel('MSA Position Index', fontsize=16)
plt.ylabel('Shannon Entropy', fontsize=16)
plt.show()
def running_mean(l, N):
sum = 0
result = list(0 for x in l)
for i in range( 0, N ):
sum = sum + l[i]
result[i] = sum / (i+1)
for i in range( N, len(l) ):
sum = sum - l[i-N] + l[i]
result[i] = sum / N
return result
def main():
"""Compute Shannon Entropy from a provided MSA."""
# Parse arguments
args = parseArgs()
# Convert object elements to standard variables for functions
msa = args.alignment
alnformat = args.alnformat
verbose = args.verbose
makeplot = args.makeplot
runningmean = args.runningmean
# Start calling functions to do the heavy lifting
alignment, seq_lengths, index = parseMSA(msa, alnformat, verbose)
sel = shannon_entropy_list_msa(alignment)
if runningmean > 0:
sel = running_mean(sel, runningmean)
if makeplot is True:
plot(index, sel, verbose)
if verbose > 0: print("Index" + '\t' + "Entropy")
for c1, c2 in zip(index, sel):
print(str(c1) + '\t' + str(c2))
if __name__ == '__main__':
main()
|
MicroInfect/bioinfx
|
Shannon.py
|
Python
|
gpl-3.0
| 6,304
|
[
"Biopython"
] |
29fd5db820c0f8bc605871c377b47baf18008394d94943ab915cf5a8029f4be6
|
#=========================================================================
#
# Program: PXDMFReader Plugin
# Module: ReaderSync/py
#
# Copyright (c) GeM, Ecole Centrale Nantes.
# All rights reserved.
# Copyright: See COPYING file that comes with this distribution
#
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
#=========================================================================
from paraview.simple import *
import paraview.vtk
class ReaderSync():
""" Class to synchronize PXDMF Readers
import ReaderSync
Pxdmfsync = ReaderSync.ReaderSync()
Pxdmfsync.SetFixedDimension('Amp', 1.2)
Pxdmfsync.SetFixedDimensionPer('DX', 0.2)
"""
def __init__(self):
self.UpdateSources()
self.printstate = 0;
def UpdateSources(self):
"""Update the internal list of PXDMF Readers"""
self.mins = {}
self.maxs = {}
self.sources = GetSources()
self.nbSources = len(self.sources)
self.PXDMFReaders = []
for i in range(self.nbSources):
mysource = self.sources[self.sources.keys()[i]]
if(isinstance(mysource, paraview.servermanager.sources.PXDMFReader )):
self.PXDMFReaders.append(mysource)
rmins = mysource.GetPropertyValue('PXDMFDimsMinRangeDataInfo')
rmaxs = mysource.GetPropertyValue('PXDMFDimsMaxRangeDataInfo')
names = mysource.GetPropertyValue('PXDMFDimsNameDataInfo')
nbdims = len(names)
for dim in range(nbdims):
if self.mins.has_key(names[dim]):
self.mins[names[dim]] = min(self.mins[names[dim]],rmins[dim])
self.maxs[names[dim]] = min(self.maxs[names[dim]],rmaxs[dim])
else:
self.mins[names[dim]] = rmins[dim]
self.maxs[names[dim]] = rmaxs[dim]
def SetFixedDimension(self, name, value):
"""Set the value of a fixed coordinate """
if not self.mins.has_key(name):
return
for mysource in self.PXDMFReaders:
fixeddims = mysource.GetProperty('FixedDimensions')
names = mysource.GetPropertyValue('PXDMFDimsNameDataInfo')
for dim in range(len(names)):
if names[dim] == name:
fixeddims[dim] = value
def SetFixedDimensionPer(self, name, value):
"""Set the value of a fixed coordinate using percentage. value must be between 0 and 1 """
if not self.mins.has_key(name):
return
for mysource in self.PXDMFReaders:
fixeddims = mysource.GetProperty('FixedDimensions')
names = mysource.GetPropertyValue('PXDMFDimsNameDataInfo')
for dim in range(len(names)):
if names[dim] == name:
fixeddims[dim] = value*(self.maxs[name]-self.mins[name])+self.mins[name]
def SetFixedDimensionIndexPer(self, index, value):
"""Set the value of a fixed coordinate using percentage. value must be between 0 and 1 """
if len(self.mins)<= index:
return
name = self.mins.items()[index][0]
for mysource in self.PXDMFReaders:
fixeddims = mysource.GetProperty('FixedDimensions')
names = mysource.GetPropertyValue('PXDMFDimsNameDataInfo')
for dim in range(len(names)):
if names[dim] == name:
fixeddims[dim] = value*(self.maxs[name]-self.mins[name])+self.mins[name]
def UpdatePipeline(self):
"""Call UpdatePipeline() on each register PXDMFReader"""
for reader in self.PXDMFReaders:
reader.UpdatePipeline()
def GetState(self):
"""To recover the state off all the coordinate in a text form. must set the printstate variable to 1 first
>>> Pxdmfsync.printstate = 1
>>> Pxdmfsync.GetState()
'STATE;:Y:0.0:1.0:X:0.0:3.0;FIN'
"""
if self.printstate:
self.UpdateSources();
keys = self.maxs.keys()
res = "STATE;"
for key in keys :
res += ":"+key +":" + str(self.mins[key])+ ":" + str(self.maxs[key])
res += ";FIN\n"
self.printstate = 0;
return res
return ""
def pvrotate(x,y):
"""
Point of view rotation in degree
note: Render() must be called to redraw the scene
"""
renView = GetRenderView()
transform = paraview.vtk.vtkTransform()
camera = GetActiveCamera()
renderer = renView.GetRenderer()
scale = paraview.vtk.vtkMath.Norm(camera.GetPosition())
if scale <= 0.0:
scale = paraview.vtk.vtkMath.Norm(camera.GetFocalPoint())
if scale <= 0.0:
scale = 1.0
FPoint = camera.GetFocalPoint()
FPoint0 = FPoint[0]
FPoint1 = FPoint[1]
FPoint2 = FPoint[2]
camera.SetFocalPoint(FPoint0/scale,FPoint1/scale,FPoint2/scale)
PPoint = camera.GetPosition()
PPoint0 = PPoint[0]
PPoint1 = PPoint[1]
PPoint2 = PPoint[2]
camera.SetPosition(PPoint0/scale,PPoint1/scale,PPoint2/scale)
center = renView.CenterOfRotation
Center0 = center[0]
Center1 = center[1]
Center2 = center[2]
renderer.SetWorldPoint(Center0,Center1,Center2,1.0)
renderer.WorldToDisplay()
v2 = [0,0,0]
transform.Identity()
transform.Translate(Center0/scale,Center1/scale,Center2/scale)
camera.OrthogonalizeViewUp()
viewUp = camera.GetViewUp()
viewUp0 = viewUp[0]
viewUp1 = viewUp[1]
viewUp2 = viewUp[2]
size = renderer.GetSize()
transform.RotateWXYZ(360.0 * x / size[0], viewUp0, viewUp1, viewUp2)
paraview.vtk.vtkMath.Cross(camera.GetDirectionOfProjection(), viewUp, v2)
transform.RotateWXYZ(-360.0 * y / size[1], v2[0], v2[1], v2[2])
transform.Translate(-Center0/scale,-Center1/scale,-Center2/scale)
camera.ApplyTransform(transform)
camera.OrthogonalizeViewUp()
FPoint = camera.GetFocalPoint()
FPoint0 = FPoint[0]
FPoint1 = FPoint[1]
FPoint2 = FPoint[2]
camera.SetFocalPoint(FPoint0*scale,FPoint1*scale,FPoint2*scale)
PPoint = camera.GetPosition()
PPoint0 = PPoint[0]
PPoint1 = PPoint[1]
PPoint2 = PPoint[2]
camera.SetPosition(PPoint0*scale,PPoint1*scale,PPoint2*scale)
renderer.ResetCameraClippingRange()
def pvzoom(a):
"""
Zoom : negative number get closer, positive get further away
note: Render() must be called to redraw the scene
"""
RenderView1 = GetRenderView();
Fp = RenderView1.CameraFocalPoint;
Cp = RenderView1.CameraPosition;
Cvu = RenderView1.CameraViewUp;
Ld = [Fp[0]-Cp[0],Fp[1]-Cp[1],Fp[2]-Cp[2]];
RenderView1.CameraPosition[0] = RenderView1.CameraPosition[0]-Ld[0]*a*0.2
RenderView1.CameraPosition[1] = RenderView1.CameraPosition[1]-Ld[1]*a*0.2
RenderView1.CameraPosition[2] = RenderView1.CameraPosition[2]-Ld[2]*a*0.2
GetRenderView().GetRenderer().ResetCameraClippingRange()
def pvdolly(a):
"""
Zoom : positive number get closser, negative get further away
note: Render() must be called to redraw the scene
"""
camera = GetActiveCamera()
renView = GetRenderView()
renderer = renView.GetRenderer()
FPoint = camera.GetFocalPoint()
FPoint0 = FPoint[0]
FPoint1 = FPoint[1]
FPoint2 = FPoint[2]
PPoint = camera.GetPosition()
PPoint0 = PPoint[0]
PPoint1 = PPoint[1]
PPoint2 = PPoint[2]
Norm = camera.GetDirectionOfProjection()
Norm0 = Norm[0]
Norm1 = Norm[1]
Norm2 = Norm[2]
size = renderer.GetSize()
rng = GetRenderView().CameraClippingRange
ZoomScale = 1.5 * rng[1] / size[1]
k = a * ZoomScale
temp = k * Norm0
PPoint0 += temp
FPoint0 += temp
temp = k * Norm1
PPoint1 += temp
FPoint1 += temp
temp = k * Norm2
PPoint2 += temp
FPoint2 += temp
camera.SetFocalPoint(FPoint0, FPoint1, FPoint2)
camera.SetPosition(PPoint0, PPoint1, PPoint2)
renderer.ResetCameraClippingRange()
def pvpan(x,y):
"""
Pan : xpan, ypan
note: Render() must be called to redraw the scene
"""
renView = GetRenderView()
camera = GetActiveCamera()
(FPoint0,FPoint1,FPoint2) = camera.GetFocalPoint()
(PPoint0,PPoint1,PPoint2) = camera.GetPosition()
renderer = renView.GetRenderer()
renderer.SetWorldPoint(FPoint0, FPoint1, FPoint2, 1.0)
renderer.WorldToDisplay()
DPoint = renderer.GetDisplayPoint()
focalDepth = DPoint[2]
(centerX,centerY) = renView.GetRenderWindow().GetSize()
APoint0 = centerX/2.0 + x
APoint1 = centerY/2.0 + y
renderer.SetDisplayPoint(APoint0, APoint1, focalDepth)
renderer.DisplayToWorld()
(RPoint0,RPoint1,RPoint2,RPoint3) = renderer.GetWorldPoint()
if RPoint3 != 0.0:
RPoint0 = RPoint0/RPoint3
RPoint1 = RPoint1/RPoint3
RPoint2 = RPoint2/RPoint3
camera.SetFocalPoint( (FPoint0-RPoint0)/2.0 + FPoint0,
(FPoint1-RPoint1)/2.0 + FPoint1,
(FPoint2-RPoint2)/2.0 + FPoint2)
camera.SetPosition( (FPoint0-RPoint0)/2.0 + PPoint0,
(FPoint1-RPoint1)/2.0 + PPoint1,
(FPoint2-RPoint2)/2.0 + PPoint2)
renderer.ResetCameraClippingRange()
def pvroll(x):
"""
roll : rotation in degree
note: Render() must be called to redraw the scene
"""
renView = GetRenderView()
camera = GetActiveCamera()
renderer = renView.GetRenderer()
FPoint = camera.GetFocalPoint()
FPoint0 = FPoint[0]
FPoint1 = FPoint[1]
FPoint2 = FPoint[2]
PPoint = camera.GetPosition()
PPoint0 = PPoint[0]
PPoint1 = PPoint[1]
PPoint2 = PPoint[2]
Axis0 = FPoint0 - PPoint0
Axis1 = FPoint1 - PPoint1
Axis2 = FPoint2 - PPoint2
center = renView.CenterOfRotation
Center0 = center[0]
Center1 = center[1]
Center2 = center[2]
renderer.SetWorldPoint(Center0,Center1,Center2,1.0)
renderer.WorldToDisplay()
transform = paraview.vtk.vtkTransform()
transform.Identity()
transform.Translate(Center0,Center1,Center2)
transform.RotateWXYZ(x, Axis0, Axis1, Axis2)
transform.Translate(-Center0,-Center1,-Center2)
camera.ApplyTransform(transform)
camera.OrthogonalizeViewUp()
renderer.ResetCameraClippingRange()
|
aleygue/PxdmfSuite
|
ParaviewPXDMFReader/PGDTools/ReaderSync.py
|
Python
|
bsd-3-clause
| 9,862
|
[
"ParaView",
"VTK"
] |
efac9ece5ac20e51ab1a62ec0ea98bd1fc9bebf5488628c3b70b3f1c3edff733
|
import numpy as np
import math
import sys
sys.path.insert(0,'../..')
import os
import classifier_eval_simplified
from sklearn import tree
from sklearn.ensemble import AdaBoostClassifier
from sklearn.svm import SVC
for dim in range(9,11):
comp_file_list=[]
####################################################################
# Gaussian samples operation
####################################################################
for i in range(1,101):
comp_file_list.append((os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/higher_dimensional_gauss/gauss_data/data_high" +str(dim)+"Dgauss_10000_0.5_0.1_0.0_{0}.txt".format(i),os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/higher_dimensional_gauss/gauss_data/data_high"+str(dim)+"Dgauss_10000_0.5_0.1_0.01_{0}.txt".format(i)))
#clf = tree.DecisionTreeClassifier('gini','best',37, 89, 1, 0.0, None)
clf = AdaBoostClassifier(base_estimator=tree.DecisionTreeClassifier(max_depth=2), learning_rate=0.01,n_estimators=983)
#clf = SVC(C=params['aC'],gamma=params['agamma'],probability=True, cache_size=7000)
args=[str(dim)+ "Dgauss_bdt_AD","particle","antiparticle",100,comp_file_list,1,clf,np.logspace(-2, 10, 13),np.logspace(-9, 3, 13),1]
#For nn:
#args=[str(dim)+"Dgauss_nn","particle","antiparticle",100,comp_file_list,1,clf,np.logspace(-2, 10, 13),np.logspace(-9, 3, 13),params['dimof_middle'],params['n_hidden_layers']]
####################################################################
classifier_eval_simplified.classifier_eval(0,0,args)
|
weissercn/MLTools
|
Dalitz_simplified/evaluation_of_optimised_classifiers/bdt_gauss/bdt_Gauss_evaluation_of_optimised_classifiers.py
|
Python
|
mit
| 1,571
|
[
"Gaussian"
] |
c70c782ae6d71f4fc6b664b0ec8ebed91363f62c297cf126cc00cc0b8678b530
|
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""Fine-tune gs://vmoe_checkpoints/vmoe_b16_imagenet21k_randaug_strong on CIFAR10.
Test accuracy (mean over 3 runs with different fine-tuning seeds): 98.7%.
The accuracy is not SOTA. This config file was designed to easily fit on a small
TPUv2-8 or TPUv3-8, and fine-tune in about 10 minutes (TPUv3-8).
"""
# pylint: enable=line-too-long
import ml_collections
# Paths to manually downloaded datasets and to the tensorflow_datasets data dir.
TFDS_MANUAL_DIR = None
TFDS_DATA_DIR = None
# The following configuration was made to fit on TPUv3-32. The number of images
# per device has to be at least 32.
BATCH_SIZE = 1024 # Number of images processed in each step.
NUM_CLASSES = 10 # Number of CIFAR10 classes.
IMAGE_SIZE = 128 # Image size as input to the model.
PATCH_SIZE = 16 # Patch size.
NUM_LAYERS = 12 # Number of encoder blocks in the transformer.
NUM_EXPERTS = 8 # Number of experts in each MoE layer.
NUM_SELECTED_EXPERTS = 2 # Maximum number of selected experts per token.
# For efficiency reasons, the tokens are divided in several groups of the
# following size. The routing is performed independently on each group.
# For efficiency reasons, the group size should be a divisor of the number of
# tokens in each device. The resulting number of groups MUST be a multiple of
# the number of experts.
GROUP_SIZE = 8 * ((IMAGE_SIZE // PATCH_SIZE)**2 + 1)
# This is the number of tokens that are processed per expert per group.
# We give some slack to the expected number of tokens per expert, if the routing
# was perfectly balanced.
CAPACITY_SIZE_RATIO = 1.5
CAPACITY = int(GROUP_SIZE * CAPACITY_SIZE_RATIO * NUM_SELECTED_EXPERTS //
NUM_EXPERTS)
def get_config():
"""Fine-tune gs://vmoe_checkpoints/vmoe_b16_imagenet21k_randaug_strong on CIFAR10."""
config = ml_collections.ConfigDict()
config.dataset = ml_collections.ConfigDict()
pp_common = f'value_range(-1,1)|onehot({NUM_CLASSES}, inkey="label", outkey="labels")|keep("image", "labels")'
# Dataset variation used for training.
config.dataset.train = get_data_params(
name='cifar10',
split='train[:98%]',
process=f'decode|inception_crop({IMAGE_SIZE})|flip_lr|{pp_common}',
shuffle_buffer=50_000,
cache=None)
# Dataset variation used for validation.
config.dataset.val = get_data_params(
name='cifar10',
split='train[98%:]',
process=f'decode|resize({IMAGE_SIZE})|{pp_common}',
shuffle_buffer=None,
cache='batched')
# Dataset variation used for test.
config.dataset.test = get_data_params(
name='cifar10',
split='test',
process=f'decode|resize({IMAGE_SIZE})|{pp_common}',
shuffle_buffer=None,
cache='batched')
# Loss used to train the model.
config.loss = ml_collections.ConfigDict()
config.loss.name = 'softmax_xent'
# Model parameters depend on the model type.
config.description = 'V-MoE-B/16, K=2, Every 2'
config.train_steps = 1_000
config.initialization = ml_collections.ConfigDict({
'name': 'initialize_from_vmoe_release',
'prefix': 'gs://vmoe_checkpoints/vmoe_b16_imagenet21k_randaug_strong',
'keep': ['head'],
})
config.model = ml_collections.ConfigDict({
'name': 'VisionTransformerMoe',
'num_classes': NUM_CLASSES,
'patch_size': (16, 16),
'hidden_size': 768,
'classifier': 'token',
'representation_size': None,
'head_bias_init': -10.0,
'encoder': {
'num_layers': NUM_LAYERS,
'num_heads': 12,
'mlp_dim': 3072,
'dropout_rate': 0.0,
'attention_dropout_rate': 0.0,
'moe': {
'num_experts': NUM_EXPERTS,
'group_size': GROUP_SIZE,
'layers': tuple(range(1, NUM_LAYERS, 2)),
'dropout_rate': 0.0,
'split_rngs': False, # All experts share initialization.
'router': {
'num_selected_experts': NUM_SELECTED_EXPERTS,
'noise_std': 1.0, # This is divided by NUM_EXPERTS.
'importance_loss_weight': 0.005,
'load_loss_weight': 0.005,
'dispatcher': {
'name': 'einsum',
'bfloat16': True,
'capacity': CAPACITY,
# This is used to hint pjit about how data is distributed
# at the input/output of each MoE layer.
# This value means that the tokens are partitioned across
# all devices in the mesh (i.e. fully data parallelism).
'partition_spec': (('expert', 'replica'),),
# We don't use batch priority for training/fine-tuning.
'batch_priority': False,
},
},
},
},
})
config.optimizer = ml_collections.ConfigDict({
'name': 'sgd',
'momentum': 0.9,
'accumulator_dtype': 'float32',
'learning_rate': {
'schedule': 'warmup_cosine_decay',
'peak_value': 0.0015,
'end_value': 1e-5,
'warmup_steps': 100,
},
'gradient_clip': {'global_norm': 10.0},
})
# These control how the model parameters are partitioned across the device
# mesh for running the models efficiently.
# By setting num_expert_partitions = num_experts, we set at most one expert on
# each device.
config.num_expert_partitions = config.model.encoder.moe.num_experts
# This value specifies that the first axis of all parameters in the MLPs of
# MoE layers (which has size num_experts) is partitioned across the 'expert'
# axis of the device mesh.
config.params_axis_resources = [('Moe/Mlp/.*', ('expert',))]
config.extra_rng_keys = ('dropout', 'gating')
# Write checkpoints every 1000 steps.
config.save_checkpoint = ml_collections.ConfigDict()
config.save_checkpoint.every_steps = 1_000
config.save_checkpoint.keep_last = 1
config.save_checkpoint.num_shards = 32 # Target number of checkpoint shards.
config.save_checkpoint.wait_seconds = 1.0
# Report training progress every minute.
config.report_progress = ml_collections.ConfigDict()
config.report_progress.every_secs = None
config.report_progress.every_steps = 100
# Evaluate on the validation set every 1000 steps.
config.evaluate = ml_collections.ConfigDict()
config.evaluate.every_steps = 100
# Run device profiling on process_index = 0, for 5 steps, starting at step 10.
# Then repeat profiling every hour.
config.profile = ml_collections.ConfigDict()
config.profile.all_processes = False
config.profile.num_profile_steps = 5
config.profile.first_profile = 10
config.profile.every_secs = 3600.0
config.seed = 0
return config
def get_data_params(name, split, process, shuffle_buffer, cache):
"""Returns dataset parameters."""
config = ml_collections.ConfigDict()
config.name = name
config.split = split
config.process = process
config.batch_size = BATCH_SIZE
config.prefetch = 'autotune'
config.prefetch_device = 2
config.data_dir = TFDS_DATA_DIR
config.manual_dir = TFDS_MANUAL_DIR
if shuffle_buffer:
config.shuffle_buffer = shuffle_buffer
if cache:
config.cache = cache
return config
def get_hyper(hyper):
return hyper.sweep('config.seed', list(range(3)))
|
google-research/vmoe
|
vmoe/configs/vmoe_b16_imagenet21k_randaug_strong_ft_cifar10.py
|
Python
|
apache-2.0
| 7,959
|
[
"MOE"
] |
e6e8cfa48bd0ec77b8c731f1671fe94bd101eefd62cec13d76ce0e5d692f51d5
|
import _gfrd
import myrandom
import vtk
m = _gfrd.Model()
S0 = m.new_species_type()
S0['D'] = '.01'
S0['radius'] = '.01'
S0['surface'] = 'default'
S1 = m.new_species_type()
S1['D'] = '.01'
S1['radius'] = '.01'
S1['surface'] = 'default'
S2 = m.new_species_type()
S2['D'] = '.01'
S2['radius'] = '.01'
S2['surface'] = 'default'
colors = {
S0.id: (1., 0., 0.),
S1.id: (0., 1., 0.),
S2.id: (1., 1., 0.),
}
rr = _gfrd.ReactionRule((S0, S1), (S2, ))
rr['k'] = '.01'
m.network_rules.add_reaction_rule(rr)
nrw = _gfrd.NetworkRulesWrapper(m.network_rules)
class MyParticleContainer(_gfrd._ParticleContainer):
def __init__(self, world_size):
_gfrd._ParticleContainer.__init__(self)
self.particles = {}
self.surfaces = {}
self.species = {}
self.pidgen = _gfrd.ParticleIDGenerator(0)
self.world_size = world_size
def add_surface(self, surface):
self.surfaces[surface.id] = surface
def add_species(self, species):
self.species[species.id] = species
def get_surface(self, id):
return self.surfaces[id]
def get_species(self, id):
if isinstance(id, _gfrd.SpeciesType):
id = id.id
return self.species[id]
def new_particle(self, species_id, position):
new_pid = self.pidgen()
species = self.get_species(species_id)
retval = (new_pid, _gfrd.Particle(position, species.radius, species.D, species.id))
self.update_particle(retval)
return retval
def update_particle(self, pid_particle_pair):
self.particles[pid_particle_pair[0]] = pid_particle_pair[1]
return False
def remove_particle(self, pid):
del self.particles[pid]
def get_particle(self, pid):
p = self.particles.get(pid, None)
if p is None:
raise NotFound
return pid, p
def check_overlap(self, sphere, ignores):
retval = []
for pp in self.particles.iteritems():
if pp[0] in ignores:
continue
dist = _gfrd.distance(pp[1].position, sphere.position) - pp[1].radius
if dist < sphere.radius:
retval.append((pp, dist))
retval.sort(lambda a, b: cmp(a[1], b[1]))
return retval
def distance(self, x, y):
return _gfrd.distance_cyclic(x, y, self.world_size)
def apply_boundary(self, x):
return _gfrd.apply_boundary(x, self.world_size)
def cyclic_transpose(self, x, y):
return _gfrd.cyclic_transpose(x, y, self.world_size)
def __iter__(self):
return self.particles.iteritems()
def create_transaction(self):
return _gfrd.TransactionImpl(self)
w = MyParticleContainer(1.0)
region = _gfrd._CuboidalRegion("default",
_gfrd.Box((.5, .5, .5), (1., 0., 0.), (0., 1., 0.), (0., 0., 1.), 1., 1., .1))
w.add_surface(region)
for s in [S0, S1, S2]:
w.add_species(_gfrd.SpeciesInfo(s.id, float(s['D']), float(s['radius']), s['surface']))
for i in xrange(0, 300):
w.new_particle([S0, S1][i % 2],
[myrandom.uniform(), myrandom.uniform(), myrandom.uniform()])
wn = vtk.vtkRenderWindow()
int = wn.MakeRenderWindowInteractor()
int.Initialize()
int.SetRenderWindow(wn)
r = vtk.vtkRenderer()
wn.AddRenderer(r)
actors = {}
def create_actor(pp):
s = vtk.vtkSphereSource()
s.SetRadius(pp[1].radius)
s.SetCenter(pp[1].position)
m = vtk.vtkPolyDataMapper()
m.SetInput(s.GetOutput())
a = vtk.vtkActor()
a.GetProperty().SetColor(colors[pp[1].sid])
a.SetMapper(m)
r.AddActor(a)
actors[pp[0]] = (s, a)
def update_actor(pp):
actors[pp[0]][0].SetCenter(pp[1].position)
def remove_actor(pp):
r.RemoveActor(actors[pp[0]][1])
del actors[pp[0]]
for pp in w:
create_actor(pp)
anim = []
def callback(*arg):
t = w.create_transaction()
particle_id_list = [pair[0] for pair in w]
propagator = _gfrd._BDPropagator(w, t, nrw, myrandom.rng, 1e-3, 100, particle_id_list)
propagator.propagate_all()
for pp in t.added_particles:
create_actor(pp)
s = vtk.vtkSphereSource()
s.SetCenter(pp[1].position)
s.SetRadius(.01)
m = vtk.vtkPolyDataMapper()
m.SetInput(s.GetOutput())
a = vtk.vtkActor()
a.GetProperty().SetColor((1., 1., 1.))
a.GetProperty().SetOpacity(.2)
a.SetMapper(m)
r.AddActor(a)
anim.append((1, s, a))
for pp in t.removed_particles:
remove_actor(pp)
for pp in t.modified_particles:
update_actor(pp)
l = len(anim)
j = 0
while j < l:
i, s, a = anim[j]
if i >= 4:
r.RemoveActor(a)
del anim[j]
l -= 1
continue
s.SetRadius(0.04 * i)
a.GetProperty().SetOpacity(.3 - .05 * i)
anim[j] = (i + 1, s, a)
j += 1
wn.Render()
del t
int.CreateRepeatingTimer(100)
int.AddObserver('TimerEvent', callback, .0)
int.Start()
|
gfrd/egfrd
|
samples/bd_propagator/test2.py
|
Python
|
gpl-2.0
| 4,991
|
[
"VTK"
] |
36d0fb4e78594f5f327281371f557ad7eac032ead8c7d38e8cfd639698b959d7
|
#! /usr/bin/env python
"""Unit tests for landlab.io.netcdf module."""
import pytest
from numpy.testing import assert_array_equal
from landlab import RasterModelGrid
from landlab.io import (
MismatchGridDataSizeError,
MismatchGridXYLowerLeft,
MismatchGridXYSpacing,
)
from landlab.io.netcdf import read_netcdf
grid_mapping_keys = [
"grid_mapping_name",
"longitude_of_central_meridian",
"false_easting",
"false_northing",
"latitude_of_projection_origin",
"scale_factor_at_central_meridian",
"long_name",
"longitude_of_prime_meridian",
"semi_major_axis",
"inverse_flattening",
"spatial_ref",
"GeoTransform",
]
def test_read_netcdf(datadir):
grid = read_netcdf(datadir / "test-netcdf4.nc")
assert grid.shape == (4, 3)
assert grid.dy, grid.dx == (1.0, 1.0)
assert list(grid.at_node.keys()) == ["surface__elevation"]
assert_array_equal(
grid.at_node["surface__elevation"],
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0],
)
def test_read_netcdf_64bit(datadir):
grid = read_netcdf(datadir / "test-netcdf3-64bit.nc")
assert grid.shape == (4, 3)
assert grid.dy, grid.dx == (1.0, 1.0)
grid = RasterModelGrid((6, 5), xy_of_lower_left=(-1.0, -1.0))
grid = read_netcdf(datadir / "test-netcdf4.nc", grid=grid, halo=1, nodata_value=-1)
assert_array_equal(
grid.at_node["surface__elevation"].reshape(grid.shape),
[
[-1.0, -1.0, -1.0, -1.0, -1.0],
[-1.0, 0.0, 1.0, 2.0, -1.0],
[-1.0, 3.0, 4.0, 5.0, -1.0],
[-1.0, 6.0, 7.0, 8.0, -1.0],
[-1.0, 9.0, 10.0, 11.0, -1.0],
[-1.0, -1.0, -1.0, -1.0, -1.0],
],
)
def test_read_netcdf4_bad_field_name(datadir):
with pytest.raises(ValueError):
read_netcdf(datadir / "test-netcdf4.nc", name="not_surface__elevation")
def test_read_netcdf3_64bit(datadir):
"""Test read_netcdf for with 64-bit netcdf3 format."""
grid = read_netcdf(datadir / "test-netcdf3-64bit.nc")
assert grid.shape == (4, 3)
def test_read_netcdf4(datadir):
"""Test read_netcdf with netcdf4 format."""
grid = read_netcdf(datadir / "test-netcdf4.nc")
assert grid.shape == (4, 3)
grid = read_netcdf(datadir / "test-netcdf4.nc")
assert grid.shape == (4, 3)
def test_bad_data_size(datadir):
"""Test read_netcdf with netcdf4 format."""
grid = RasterModelGrid((10, 10))
with pytest.raises(MismatchGridDataSizeError):
read_netcdf(datadir / "test-netcdf4.nc", grid=grid)
def test_bad_dx(datadir):
"""Test read_netcdf with netcdf4 format."""
grid = RasterModelGrid((4, 3), xy_spacing=10)
with pytest.raises(MismatchGridXYSpacing):
read_netcdf(datadir / "test-netcdf4.nc", grid=grid)
def test_bad_llc(datadir):
"""Test read_netcdf with netcdf4 format."""
grid = RasterModelGrid((4, 3), xy_of_lower_left=(-1, -2))
with pytest.raises(MismatchGridXYLowerLeft):
read_netcdf(datadir / "test-netcdf4.nc", grid=grid)
|
cmshobe/landlab
|
tests/io/netcdf/test_read_netcdf.py
|
Python
|
mit
| 3,051
|
[
"NetCDF"
] |
c631fd8ad9d1a79b11d8f15d764df36ed757980facf14adc99a06a73fdef6b26
|
import unittest
from octopus.server.orientdb.orientdb_shell_mananger import OrientDBShellManager
class TestOrientDBShellManager(unittest.TestCase):
def testUnreachableServer(self):
self.hostname = 'localhost'
self.port = '1337'
shell_manager = OrientDBShellManager(self.hostname, self.port)
shells = shell_manager.list()
self.assertRaises(ConnectionRefusedError, list, shells)
|
octopus-platform/bjoern
|
python/octopus-tools/tests/orientdb_shell_manager.py
|
Python
|
gpl-3.0
| 425
|
[
"Octopus"
] |
1be933db8cc7ae8b7d4a158f19ea9e3b673d368b5738ec482f5736ce95ba0771
|
# coding=utf-8
from unittest import TestCase
from gputils.gptransformer import GooglePlayParsedResponseTransformer
from json2html import *
class TestTransforming(TestCase):
__PARSED_RESPONSE = '[<div class=\"review-body\"> <span class=\"review-title\">Galaxy S5</span> Paro de funcionar novamente no Estado do Parana <div class=\"review-link\" style=\"display:none\"> <a class=\"id-no-nav play-button tiny\" href=\"#\" target=\"_blank\">Resenha completa</a> </div> </div>]'
__EXPECTED_JSON = u'{\n "reviews": [\n {\n "review": "Galaxy S5 Paro de funcionar novamente no Estado do Parana "\n }\n ]\n}'
__EXPECTED_HTML = u'<table border="1"><tr><th>reviews</th><td><ul><li><table border="1"><tr><th>review</th><td>Galaxy S5 Paro de funcionar novamente no Estado do Parana </td></tr></table></li></ul></td></tr></table>'
def test_can_transform_parsed_content_into_json_formatted_output(self):
"""
Should be able to transform parsed content to a json formatted string
Hint: If this fails there are chances that google changed the html response structure.
"""
transformer = GooglePlayParsedResponseTransformer(self.__PARSED_RESPONSE)
json = transformer.transform()
self.assertEqual(json, self.__EXPECTED_JSON)
def test_can_output_content_as_html(self):
"""
Should be able to transform a json transformed content into html content
"""
transformer = GooglePlayParsedResponseTransformer(self.__PARSED_RESPONSE)
json = transformer.transform()
json = json2html.convert(json = json)
self.assertEqual(json, self.__EXPECTED_HTML)
|
jcfausto/gpscrapper
|
specs/test_transforming.py
|
Python
|
mit
| 1,686
|
[
"Galaxy"
] |
3066edc496b3b035b6736da87cb53a4d2e916edaef9c44f0adc963cff89a1074
|
from parsimonious.grammar import Grammar
from parsimonious.nodes import NodeVisitor
import textwrap
from string import Template
import re
import sys
class MarkupParser(NodeVisitor):
def __init__(self, text):
grammar = """
main = meta? ( nix / markup )*
newline = ~"[\\n\\s]"+
meta = newline? meta_open meta_content meta_close
meta_open = "{---"
meta_content = ((!meta_close (~".+" / ~"\\n"))+)
meta_close = "---}"
markup = (!nix ( esc_nix_open / ~"\{{3,}" / ~"."s ))+
esc_nix_open = "\\{{"
esc_nix_close = "\\}}"
nix = nix_open nix_content nix_close
nix_open = "{{" !"{"
nix_content = ( nix / nix_expr )*
nix_expr = !nix_open !nix_close (esc_nix_close / ~"."s)
nix_close = "}}"
"""
ast = Grammar(grammar).parse(text)
self.meta = "";
self.markup = "";
self.result = self.visit(ast)
def visit_main(self, node, children):
return "".join(filter(lambda x: x != None, children))
def visit_meta(self, node, children):
self.meta = "".join(filter(lambda x: x != None, children))
return ""
def visit_newline(self, node, children):
return node.text
def visit_meta_open(self, node, children):
return ""
def visit_meta_close(self, node, children):
return ""
def visit_meta_content(self, node, children):
return node.text
def visit_nix(self, node, children):
return "".join(children)
def visit_nix_open(self, node, children):
return "${"
def visit_nix_close(self, node, children):
return "}"
def visit_nix_content(self, node, children):
return "".join(children)
def visit_nix_expr(self, node, children):
return node.text.replace("\}}", "}}")
def visit_markup(self, node, children):
return node.text.replace("''", "'''").replace("\{{", "{{").replace("${", "''${")
def generic_visit(self, node, children):
return "".join(filter(lambda x: x != None, children))
def processNixText (text):
# escaping intro sep
text = text.replace("\>>>", ">>>")
# escaping page sep
text = text.replace("\<<<", "<<<")
return text
def toNix (meta, markup):
intro_match = markup.split("\n>>>\n")
if len(intro_match) > 1:
intro = "intro = ''" + processNixText(intro_match[0]) + "'';"
content = intro_match[1]
else:
intro = "";
content = intro_match[0]
pages_match = content.split("\n<<<\n")
if len(pages_match) > 1:
pages = "pages = [ ''" + "''\n''".join( map(lambda x: processNixText(x), pages_match )) + "'' ];"
content = ""
else:
pages = "";
content = "content = ''" + processNixText(content) + "'';"
template = Template( textwrap.dedent("""
env:
let meta = rec {
$meta
}; in
with env;
({
$meta
$content
$intro
$pages
} // meta)
"""))
return template.safe_substitute(
meta = processNixText(meta)
, intro = intro
, content = content
, pages = pages
)
text = sys.stdin.read()
m = MarkupParser(text)
print (toNix (m.meta, m.result))
|
styx-static/styx
|
src/tools/parser.py
|
Python
|
mit
| 3,364
|
[
"VisIt"
] |
aefffc8a95e38db7491c52d277befe1b08aac7e163f3fa9eaca472c2d2ad99f9
|
import os.path
from subprocess import check_call, CalledProcessError
import shutil
from tempfile import NamedTemporaryFile
import sys
import pysam
from crumbs.bam.flag import create_flag
from crumbs.settings import get_setting
from crumbs.utils.bin_utils import get_num_threads
# pylint: disable=C0111
def filter_bam(in_fpath, out_fpath, min_mapq=0, required_flag_tags=None,
filtering_flag_tags=None, regions=None):
cmd = ['-bh']
# The following line:
cmd.append('-o' + out_fpath)
# should be
# cmd.extend(['-o', out_fpath])
# but it is a workaround, take a look at:
# https://groups.google.com/forum/#!msg/pysam-user-group/ooHgIiNVe4c/CcY06d45rzQJ
if min_mapq:
cmd.extend(['-q', str(min_mapq)])
if required_flag_tags:
flag = create_flag(required_flag_tags)
cmd.extend(['-f', str(flag)])
if filtering_flag_tags:
flag = create_flag(filtering_flag_tags)
cmd.extend(['-F', str(flag)])
cmd.extend([in_fpath])
if regions:
regions = ['{0}:{1}-{2}'.format(*s) for s in regions.segments]
cmd.extend(regions)
pysam.view(*cmd)
def sort_bam(in_bam_fpath, out_bam_fpath=None):
if out_bam_fpath is None:
out_bam_fpath = in_bam_fpath
if out_bam_fpath == in_bam_fpath:
sorted_fhand = NamedTemporaryFile(suffix='.sorted.bam', delete=False)
temp_out_fpath = sorted_fhand.name
else:
temp_out_fpath = out_bam_fpath
picard_jar = get_setting("PICARD_JAR")
cmd = ['java', '-jar', picard_jar, 'SortSam',
'INPUT={0}'.format(in_bam_fpath),
'OUTPUT={0}'.format(temp_out_fpath),
'SORT_ORDER=coordinate', 'VALIDATION_STRINGENCY=LENIENT']
stderr = NamedTemporaryFile(suffix='picard.stderr')
check_call(cmd, stderr=stderr)
if temp_out_fpath != out_bam_fpath:
shutil.move(temp_out_fpath, out_bam_fpath)
def index_bam(bam_fpath):
'It indexes a bam file'
pysam.index(bam_fpath)
def _create_sam_reference_index(fpath):
'It creates a sam index for a reference sequence file'
index_fpath = fpath + '.fai'
if os.path.exists(index_fpath):
return
pysam.faidx(fpath)
def _create_picard_dict(fpath):
'It creates a picard dict if if it does not exist'
dict_path = os.path.splitext(fpath)[0] + '.dict'
if os.path.exists(dict_path):
return
picard_jar = get_setting("PICARD_JAR")
cmd = ['java', '-jar', picard_jar, 'CreateSequenceDictionary',
'R=%s' % fpath,
'O=%s' % dict_path]
stderr = NamedTemporaryFile(suffix='picard.stderr')
check_call(cmd, stderr=stderr)
def realign_bam(in_bam_fpath, reference_fpath, out_bam_fpath=None):
if out_bam_fpath is None:
out_bam_fpath = in_bam_fpath
if out_bam_fpath == in_bam_fpath:
realigned_fhand = NamedTemporaryFile(suffix='.realigned.bam',
delete=False)
temp_out_fpath = realigned_fhand.name
else:
temp_out_fpath = out_bam_fpath
_realign_bam(in_bam_fpath, reference_fpath, temp_out_fpath, threads=False)
sort_bam(temp_out_fpath)
if temp_out_fpath != out_bam_fpath:
shutil.move(temp_out_fpath, out_bam_fpath)
def _realign_bam(bam_fpath, reference_fpath, out_bam_fpath, threads=False):
'It realigns the bam using GATK Local realignment around indels'
# reference sam index
_create_sam_reference_index(reference_fpath)
# reference picard dict
_create_picard_dict(reference_fpath)
# bam index
index_bam(bam_fpath)
# the intervals to realign
# gatk_dir = get_setting("GATK_DIR")
# gatk_jar = os.path.join(gatk_dir, 'GenomeAnalysisTK.jar')
gatk_jar = get_setting('GATK_JAR')
intervals_fhand = NamedTemporaryFile(suffix='.intervals')
stderr = NamedTemporaryFile(suffix='picard.stderr')
stdout = NamedTemporaryFile(suffix='picard.stdout')
cmd = ['java', '-jar', gatk_jar, '-T', 'RealignerTargetCreator',
'-I', bam_fpath, '-R', reference_fpath, '-o', intervals_fhand.name]
check_call(cmd, stderr=stderr, stdout=stdout)
# the realignment itself
cmd = ['java', '-jar', gatk_jar, '-I', bam_fpath, '-R', reference_fpath,
'-T', 'IndelRealigner', '-targetIntervals', intervals_fhand.name,
'-o', out_bam_fpath]
if threads and threads > 1:
cmd.extend(['-nt', str(get_num_threads(threads))])
check_call(cmd, stderr=stderr, stdout=stdout)
intervals_fhand.close()
def calmd_bam(in_bam_fpath, reference_fpath, out_bam_fpath=None):
if out_bam_fpath is None:
out_bam_fpath = in_bam_fpath
if out_bam_fpath == in_bam_fpath:
realigned_fhand = NamedTemporaryFile(suffix='.realigned.bam',
delete=False)
temp_out_fpath = realigned_fhand.name
else:
temp_out_fpath = out_bam_fpath
_calmd_bam(in_bam_fpath, reference_fpath, temp_out_fpath)
if temp_out_fpath != out_bam_fpath:
shutil.move(temp_out_fpath, out_bam_fpath)
def _calmd_bam(bam_fpath, reference_fpath, out_bam_fpath):
out_fhand = open(out_bam_fpath, 'wb')
for line in pysam.calmd(*["-bAr", bam_fpath, reference_fpath]):
out_fhand.write(line)
# out_fhand.write(pysam.calmd(*["-bAr", bam_fpath, reference_fpath]))
out_fhand.flush()
out_fhand.close()
def merge_sams(in_fpaths, out_fpath):
picard_jar = get_setting("PICARD_JAR")
cmd = ['java', '-jar', picard_jar, 'MergeSamFiles',
'O={}'.format(out_fpath)]
for in_fpath in in_fpaths:
cmd.append('I={}'.format(in_fpath))
stderr = NamedTemporaryFile(suffix='picard.stderr')
stdout = NamedTemporaryFile(suffix='picard.stdout')
try:
check_call(cmd, stderr=stderr, stdout=stdout)
except CalledProcessError:
sys.stderr.write(open(stderr.name).read())
sys.stdout.write(open(stdout.name).read())
|
JoseBlanca/seq_crumbs
|
crumbs/bam/bam_tools.py
|
Python
|
gpl-3.0
| 5,962
|
[
"pysam"
] |
4ee14e6698a53c8d7c7acd9e7b91702c5e8ee0fa38d410995462541d3cdbba58
|
"""bayesian_network.py
The purpose of this model is to assist with the construction of Bayesian Networks
designed for modeling the effects of a molecular mechanism. The goal is to learn
about this molecular mechanism by analysing the evidence that we have. We can then
perform a genome wide association compariang genotype with the combined evidence
of the molecular mechanism. The principal assumption behind this method is that
there is a molecular mechanism whose effects are observed across multiple phenotypes.
Here is an example network:
X1 --> X2 --> X3 --> X6
X2 --> X4 --> X3
X2 --> X5
X1 = Genotype
X2 = Molecular Mechanism
X3,X4,X5,X6 are pheontypes summarizing the disease
Goal: Learn X2 using the information in X3,X4,X5,X6 using Hard Expectation Maximiztion
Note: PCA will give similar results for Gaussian Networks without interactions
Features
--------
* Supports Linear Gaussian Nodes, Negative Binomial, Sigmoid, and categorical nodes
* Hard EM Algorithm for learning single latent (Currently it is limited to a Bernoulli Random Variable)
Example
-------
"""
from scipy.misc import logsumexp
from scipy.stats import bernoulli
from scipy.stats import uniform
from scipy.stats import norm
from scipy.stats import binom
import pandas as pd
import math
import random
import numpy as np
import scipy.stats as stats
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from sklearn import datasets, linear_model
from helpers import all_true
from helpers import find_max_key_val_in_dict
from helpers import sigmoid
from helpers import logistic
from helpers import p_neg_binom
from helpers import r_neg_binom
from helpers import fit_neg_binom
import sklearn.linear_model as model
from scipy.stats import bernoulli
from statsmodels.discrete.discrete_model import NegativeBinomial as nb_glm
class BayesianNetwork:
def __init__(self):
pass
def set_nodes(self,nodes):
"""
set the nodes for this network
* all nodes have to have a distinct name
* at most 1 latent node is allowed
"""
assert type(nodes) == list
#validate that all are nodes and all names are distinct
num_is_latent = 0
names = {}
latent_node = None
for node in nodes:
assert isinstance(node,_Node)
assert node.name != "prob"
if node.is_latent:
latent_node = node
num_is_latent += 1
assert not names.has_key(node.name), "Duplicate name: {0}".format(node.name)
names[node.name] = 1
assert num_is_latent <= 1, "Only one latent node allowed"
#compute topological sort
self.__dfs__(nodes)
assert self.nodes is not None
self.latent_node = latent_node
self.set_has_latent_descendant()
def set_has_latent_descendant(self):
"""
loop through the parents of the latent node if it exists
mark each parent as having a latent ancestor
mark any ancestors of these parents
"""
assert self.nodes is not None
if self.latent_node is not None:
for parent in self.latent_node.parents:
self.mark_has_latent_descendant(parent)
def mark_has_latent_descendant(self,node):
assert node is not None
node.set_has_latent_descendant()
for parent in node.parents:
self.mark_has_latent_descendant(parent)
def __dfs__(self,nodes):
for node in nodes:
node.tmp_mark = False
node.perm_mark = False
self.nodes = []
for node in nodes:
if not node.tmp_mark and not node.perm_mark:
self.__visit__(node)
def __visit__(self,node):
if node.tmp_mark:
raise Exception("Not a DAG")
if not node.tmp_mark and not node.perm_mark:
node.tmp_mark = True
for child in node.children:
self.__visit__(child)
node.perm_mark = True
node.tmp_mark = False
self.nodes.insert(0,node)
def print_network(self):
assert self.nodes is not None, "Please set the nodes of the network"
for node in self.nodes:
print str(node)
#forward sample
def forward_sample(self, n=1):
assert self.nodes is not None
sample = None
for i in range(0,n):
s = dict()
for node in self.nodes:
par_dict = dict()
for parent in node.parents:
par_dict[parent.name] = [ s[parent.name] ]
s[node.name] = node.simulate(par_dict)
if sample is None:
sample = dict()
for key,value in s.iteritems():
sample[key] = list()
sample[key].append(value)
else:
for key,value in s.iteritems():
sample[key].append(value)
return pd.DataFrame(sample)
#mle learning
def mle(self,data):
assert self.nodes is not None
assert type(data) == pd.DataFrame
for node in self.nodes:
node.mle(data)
#compute joint probability
def joint_prob(self,dict_vals,log=True):
assert self.nodes is not None
log_joint = 0.0
for node in self.nodes:
#print "joint " + str(node)
log_joint += node.prob(dict_vals,log=True)
if log:
return log_joint
else:
return np.exp(log_joint)
def complete_data_log_likelihood(self,data):
assert self.nodes is not None
assert type(data) == pd.DataFrame
llh = 0.0
for index,row in data.iterrows():
llh += self.joint_prob(row.to_dict())
return llh
#perform hard expectation maximization
#only support binary for time being
#will consider mcmc for normal
def hard_em(self,data,max_iter=100,initialization_func=None):
assert self.latent_node is not None
assert self.nodes is not None
assert type(data) == pd.DataFrame
#only allow this algorithm when the latent node has no parents
assert len(self.latent_node.parents) == 0
if initialization_func is not None:
#apply initialization function to the data
pass
#assume data has been completed already as the initialization is application specific
#mle of parameters to get initial parameters
num_iter = 1
self.mle(data)
previous_llh = -np.inf
current_llh = self.complete_data_log_likelihood(data)
llhs = [current_llh]
while True:
print "Iteration: {0}".format(num_iter)
print "Previous LLH: {0}".format(previous_llh)
print "Current LLH: {0}".format(current_llh)
#hard e step
self.__hard_e_step__(data)
#hard m step
self.mle(data)
previous_llh = current_llh
current_llh = self.complete_data_log_likelihood(data)
num_iter += 1
llhs.append(current_llh)
#if our log likelihood does not improve then
if current_llh <= previous_llh:
break
return { "num_iter": num_iter, "llhs": llhs }
def __prob_x_given_others__(self,dict_data,target = None):
"""
Computes the log conditional probability of each value from the target variable given
all other variables.
Return: dictionary [value] --> log prob
* assumes all other variables have been specified
"""
if target is None:
target = self.latent_node
#check that all variables have been specified
for node in self.nodes:
if node is not target:
assert dict_data.has_key(node.name)
old_x = dict_data[target.name]
log_probs = list()
log_joint_prob_dict = dict()
#compute joint probs
for x_val in target.values:
dict_data[target.name] = x_val
log_joint_prob = self.joint_prob(dict_data)
log_probs.append(log_joint_prob)
#print "log_joint_prob: {0}".format(log_joint_prob)
#print "x_val: {0}".format(x_val)
#print "log_joint_prob: {0}".format(log_joint_prob)
log_joint_prob_dict[x_val] = log_joint_prob
#normalize joint probs
normalizer = logsumexp(log_probs)
log_cond_prob = dict()
for x_val,log_joint_prob in log_joint_prob_dict.iteritems():
log_conditional_prob = log_joint_prob - normalizer
log_cond_prob[x_val] = log_conditional_prob
#reset input dictionary
dict_data[target.name] = old_x
return log_cond_prob
def __hard_e_step__(self,data):
"""
loop through each row of the data
compute the prob of each value of latent variable
assign each rows value to be the MAP estimator (value with maximum probability: mode)
"""
assert self.latent_node is not None
assert self.latent_node.num_vals == 2
for index,row in data.iterrows():
log_cond_probs = self.__prob_x_given_others__(row.to_dict())
max_val,max_log_prob = find_max_key_val_in_dict(log_cond_probs)
#assign the row to be the value with the maximum probability
data.set_value(index,self.latent_node.name,max_val)
#row[self.latent_node.name] = max_val
#Note that the underscore makes this class private
class _Node:
"""
All children must provide an implementation for: set_params, mle, simulate, prob
"""
def __init__(self,name):
self.type = "Node"
self.name = name
self.params = None
self.children = []
self.parents = []
self.parent_names = {}
self.children_names = {}
self.limits = []
#Current specification will only allow 1 node to be latent
#Learning will be used ONLY children of this Node
self.is_latent = False
self.has_latent_descendant = False
def set_is_latent(self):
self.is_latent = True
def set_has_latent_descendant(self):
self.has_latent_descendant = True
def set_params(self,params):
assert params is not None
self.params = params
def get_params(self):
return self.params
def is_root(self):
return len(self.parents) == 0
def is_leaf(self):
return len(self.children) == 0
def add_parent(self,parent):
assert isinstance(parent,_Node)
#add the parent to this node's parents
#add this node to the parent's children
self.parents.append(parent)
self.parent_names[parent.name] = None
parent.children.append(self)
parent.children_names[self.name] = None
def add_child(self,child):
assert isinstance(child,_Node)
#add the child to this node's children
#add this node to the childs's parents
self.children.append(child)
self.children_names[child.name] = None
child.parents.append(self)
child.parent_names[self.name] = None
def __str__(self):
return "Name: {0}, is_latent: {1}, Children: {2}".format(self.name, self.is_latent,str(self.children_names.keys()))
def mle(self):
raise Exception("Not supported")
def prob(self,dict_vals,log=True):
#make sure data has been input correctly
assert self.params is not None
assert dict_vals.has_key(self.name)
for parent_name in self.parent_names.keys():
assert dict_vals.has_key(parent_name)
def simulate(self,parent_vals=None):
assert type(parent_vals) is dict
for key,val in parent_vals.iteritems():
assert self.parent_names.has_key(key)
class DiscreteNode(_Node):
def __init__(self,name,values):
assert name is not None
assert type(values) is list
assert len(values) > 1
_Node.__init__(self,name)
# The number of values the discrete variable takes on
self.num_vals = len(values)
self.values = values
def add_parent(self,parent):
assert isinstance(parent,DiscreteNode)
_Node.add_parent(self,parent)
def set_params(self,params_df):
"""params_df are a pandas dataframe
must contain all the parents and itself in columns and an additional column for prob
"""
assert isinstance(params_df,pd.DataFrame)
assert params_df.shape[1] == 2 + len(self.parents)
assert self.name in params_df.columns
assert "prob" in params_df.columns
num_vals_in_df = self.num_vals
for parent in self.parents:
assert parent.name in params_df.columns
num_vals_in_df *= parent.num_vals
#make sure the number of rows are correct
assert num_vals_in_df == params_df.shape[0]
#make sure that when we sum across the values of self
#that they sum to 1
parent_names = self.parent_names.keys()
if len(parent_names) > 0:
for p in params_df.groupby(parent_names).sum()['prob']:
np.testing.assert_almost_equal(p,1.0)
else:
np.testing.assert_almost_equal(params_df['prob'].sum(),1.0)
_Node.set_params(self,params_df)
assert isinstance(self.params,pd.DataFrame)
def prob(self,dict_vals,log=True):
_Node.prob(self,dict_vals)
#find the matching probability
prob = None
for index,row in self.params.iterrows():
keep = True
value = dict_vals[self.name]
if row[self.name] != value:
keep = False
if keep:
for variable in self.parent_names.keys():
value = dict_vals[variable]
if row[variable] != value:
#print "Row" + str(row)
#print variable
#print value
keep = False
break
if keep:
prob = row["prob"]
#we found a match so break
break
assert prob is not None
if log:
return np.log(prob)
else:
return prob
#TODO: Need to update this routine to account for unobserved events
# this does not work in those cases
def mle(self,data):
assert type(data) is pd.DataFrame
parent_names = self.parent_names.keys()
parent_names_and_self = [self.name]
parent_names_and_self.extend(parent_names)
#verify that the data fram contains the columns that we need
assert len(data.columns.intersection(parent_names_and_self)) == len(parent_names) + 1
#get columns of interest
data_sub = data[parent_names_and_self]
#TODO: check that the ranges match
assert len(parent_names_and_self) > 0
if len(parent_names_and_self) == 1:
vals_count_dict = dict()
list_of_vals_in_df = [x for x in data_sub[self.name]]
#print list_of_vals_in_df
#initialize dictionary
for self_val in self.values:
vals_count_dict[self_val] = 0.0
#count observations
total = 0.0
for val in list_of_vals_in_df:
assert vals_count_dict.has_key(val)
vals_count_dict[val] += 1.0
total += 1.0
#create param_dict
param_dict = dict()
param_dict["prob"] = []
param_dict[self.name] = []
for key,val in vals_count_dict.iteritems():
param_dict["prob"].append(val/total)
param_dict[self.name].append(key)
#create params dataframe
params = pd.DataFrame(param_dict)
self.set_params(params)
else:
#group by the parents
df_parents_groups = data_sub.groupby(parent_names)
params_res = dict()
#initialize
params_res["prob"] = list()
for name in parent_names_and_self:
params_res[name] = list()
for group in df_parents_groups:
parent_vals = group[0]
print "parent_vals: {0}".format(parent_vals)
if len(self.parents) > 1:
assert len(parent_vals) == len(parent_names)
else:
parent_vals = [parent_vals]
g_value = group[1]
counts = g_value.groupby([self.name]).count()[parent_names[0]]
freqs = counts / counts.sum()
#insert child and parent vals into dict results
for self_val in self.values:
self_prob = freqs.get(self_val)
params_res["prob"].append(self_prob)
params_res[self.name].append(self_val)
#insert parent values
for index in range(0,len(parent_names)):
par_name = parent_names[index]
par_val = parent_vals[index]
params_res[par_name].append(par_val)
params = pd.DataFrame(params_res)
###
# Add back missing params
# enumerate all param combos
# check
###
self.set_params(params)
def simulate(self,parents_vals=None):
#print parents_vals
if len(self.parent_names) > 0:
#make sure input is correct
assert type(parents_vals) is dict
for parent_name,val in parents_vals.iteritems():
assert self.parent_names.has_key(parent_name)
assert type(val) is list
assert len(val) == 1
else:
parents_vals = dict()
parents_vals[self.name] = self.values
assert parents_vals is not None
#subset data frame to only include parents (or only itself if is a root node)
df_tmp_1 = self.params[parents_vals.keys()]
#match rows to parent values
df_tmp_2 = df_tmp_1.isin(parents_vals)
#create a logical index for those rows
ind = df_tmp_2.apply(all_true,axis=1)
#subset the parameter data frame to find matching event
matching_events = self.params.loc[ind]
assert matching_events.shape[0] == self.num_vals, "Not enough events specified for {0}".format(str(self.params))
#get the probabilites for those indices
probs = matching_events['prob'].tolist()
#random sample and get the index in the dataframe for that probability
random_index = (np.random.multinomial(1,probs,size=1) == 1).tolist()[0]
#get the value of this variable that that index corresponds to
simulated_val = self.params.loc[ind][self.name].loc[random_index].iloc[0]
return simulated_val
class SigmoidNode(_Node):
# TODO: override set_params, mle, simulate, prob
def __init__(self,name,values):
assert name is not None
assert 0 in values
assert 1 in values
_Node.__init__(self,name)
# The number of values the discrete variable takes on
self.num_vals = len(values)
self.values = values
def set_params(self,params):
"""
params are a list
first param is intercept
all others are betas corresponding to each parent
"""
assert params is not None
assert type(params) is list
assert len(params) == len(self.parents) + 1
_Node.set_params(self,params)
def prob(self,dict_vals,log=True,index=0):
#check that dictionary has all values needed for calculation
_Node.prob(self,dict_vals)
val_of_node = dict_vals[self.name]
assert val_of_node is 0 or val_of_node is 1
vals = [1]
for par in self.parents:
par_val = dict_vals[par.name]
if type(par_val) is list:
vals.append(par_val[index])
else:
vals.append(par_val)
linear_comb = np.inner(self.params,vals)
p_val_is_1 = sigmoid(linear_comb)
p_val_of_node = None
if val_of_node == 1:
p_val_of_node = p_val_is_1
elif val_of_node == 0:
p_val_of_node = 1 - p_val_is_1
else:
raise("Error: Node: {0}, Value: {1} Not supported".format(self.name,val_of_node))
if log:
return np.log(p_val_of_node)
else:
return p_val_of_node
def simulate(self,parent_vals=None):
#perform assumption checking
_Node.simulate(self,parent_vals)
parent_vals_copy = parent_vals.copy()
parent_vals_copy[self.name] = 1
p_1 = self.prob(parent_vals_copy,log=False)
return bernoulli.rvs(p_1, size=1)[0]
def mle(self,data):
assert type(data) is pd.DataFrame
parent_names = []
for par in self.parents:
assert par.name in data.columns
parent_names.append(par.name)
#get columns of interest
data_sub = data[parent_names].values
response = data[self.name].values
if len(self.parents) > 0:
glm = model.LogisticRegression(C=10 ** 12)
glm.fit(X=data_sub,y=response)
params = []
params.append(glm.intercept_[0])
for beta in glm.coef_[0]:
params.append(beta)
SigmoidNode.set_params(self,params)
else:
X = [ ]
y = [ ]
for v in response.tolist():
X.append([1])
y.append(v)
glm = model.LogisticRegression(fit_intercept=False,C=10 ** 12)
glm.fit(X=X,y=y)
params = []
params.append(np.float(glm.coef_[0]))
#print params
SigmoidNode.set_params(self,params)
class ParentNode():
"""
Class for nodes without children
has some common functions for these types of nodes
"""
def add_parent(self):
raise Exception("{0} cannot have parents".format(type(self)))
def prob_easy(self,val,log=False):
vals_dict = {}
vals_dict[self.name] = val
return self.prob( vals_dict,log)
class BinaryNode(SigmoidNode,ParentNode):
"""
This class is a binary parent node that functions similarly to discrete node
but it uses a sigmoid node for its implementation
"""
def __init__(self,name):
SigmoidNode.__init__(self,name,[0,1])
def set_params(self,p):
assert type(p) is np.float
#inverse sigmoid function (logistic function)
intercept = - np.log(1/p - 1)
SigmoidNode.set_params(self,[intercept])
class LinearGaussianNode(_Node):
def __init__(self,name):
_Node.__init__(self,name)
def set_tolerance(self,tol):
assert tol is not None
assert type(tol) is np.float
self.tol = tol
def set_params(self,params,std_dev):
"""
params are a list
first param is intercept
all others are betas corresponding to each parent
std_dev is a separate parameter for the normal distribution
"""
assert params is not None
assert std_dev is not None
assert type(params) is list
assert len(params) == len(self.parents) + 1
_Node.set_params(self,params)
self.std_dev = np.float(std_dev)
def get_params(self):
return (self.params,self.std_dev)
def prob(self,dict_vals,log=True,index=0):
#check that dictionary has all values needed for calculation
_Node.prob(self,dict_vals)
val_of_node = dict_vals[self.name]
vals = [1]
for par in self.parents:
par_val = dict_vals[par.name]
if type(par_val) is list:
vals.append(par_val[index])
else:
vals.append(par_val)
#mean
linear_comb = np.inner(self.params,vals)
"""
normal distribution density function can give positive values if the standard deviation is less than 1/sqrt(2 * p)
we can address this is three ways
(1) Compute the probability of a particular value x as normal_cdf(x + tolerance) - normal_cdf(x - tolerance). (Initially I was thinking of using a tolerance of std_dev/2) #https://en.wikipedia.org/wiki/List_of_logarithmic_identities
(2) Standardize the input variables so that the distribution follows a standard normal.
(3) Throw an error if the standard deviation of a normal distribution is less than 1/sqrt(2 * pi)
After talking with William this really should not even be and issue
"""
log_p_val_of_node = norm.logpdf(val_of_node,loc=linear_comb,scale=self.std_dev)
if log:
return log_p_val_of_node
else:
return np.exp(log_p_val_of_node)
def simulate(self,parent_vals=None,index=0):
#perform assumption checking
_Node.simulate(self,parent_vals)
parent_vals_copy = parent_vals.copy()
#collect values
vals = [1]
for par in self.parents:
par_val = parent_vals[par.name]
if type(par_val) is list:
vals.append(par_val[index])
else:
vals.append(par_val)
#mean
linear_comb = np.inner(self.params,vals)
x = norm.rvs(loc=linear_comb,scale=self.std_dev,size=1)
return x
def mle(self,data):
assert type(data) is pd.DataFrame
parent_names = []
for par in self.parents:
assert par.name in data.columns
parent_names.append(par.name)
#get columns of interest
data_sub = data[parent_names].values
response = data[self.name].values
#number of predictors + 1
p = len(self.parents) + 1
def std_dev_est(fit_model,X,y):
sigma_2 = np.sum((fit_model.predict(X) - y) ** 2) / float(len(y) - p)
return np.sqrt(sigma_2)
if len(self.parents) > 0:
glm = model.LinearRegression()
glm.fit(X=data_sub,y=response)
params = []
params.append(glm.intercept_)
for beta in glm.coef_:
params.append(beta)
LinearGaussianNode.set_params(self,params,std_dev_est(glm,data_sub,response))
else:
X = [ ]
y = [ ]
for v in response.tolist():
X.append([1])
y.append(v)
glm = model.LinearRegression(fit_intercept=False)
glm.fit(X=X,y=y)
params = []
params.append(np.float(glm.coef_))
#print params
LinearGaussianNode.set_params(self,params,std_dev_est(glm,X,y))
class GaussianNode(LinearGaussianNode,ParentNode):
"""
GaussianNode that serves as a normal distribution parent node
it uses a LinearGaussianNode as its implementation
"""
def __init__(self,name):
LinearGaussianNode.__init__(self,name)
def set_params(self,mean,std_dev):
assert type(mean) is np.float
assert type(std_dev) is np.float
LinearGaussianNode.set_params(self,[mean],std_dev)
class NegativeBinomialNode(_Node):
def __init__(self,name):
_Node.__init__(self,name)
def set_params(self,params,alpha):
"""
params are a list
first param is intercept
all others are betas corresponding to each parent
alpha is the dispersion parameter
"""
assert params is not None
assert alpha is not None
assert type(params) is list
assert len(params) == len(self.parents) + 1
_Node.set_params(self,params)
self.alpha = np.float(alpha)
def prob(self,dict_vals,log=True,index=0):
#check that dictionary has all values needed for calculation
_Node.prob(self,dict_vals)
val_of_node = dict_vals[self.name]
vals = [1]
for par in self.parents:
par_val = dict_vals[par.name]
if type(par_val) is list:
vals.append(par_val[index])
else:
vals.append(par_val)
#mean
linear_comb = np.inner(self.params,vals)
mean = np.exp(linear_comb)
log_p_val_of_node = p_neg_binom(val_of_node,alpha=self.alpha,mean=mean,log=True)
if log:
return log_p_val_of_node
else:
return np.exp(log_p_val_of_node)
def simulate(self,parent_vals=None,index=0):
#perform assumption checking
_Node.simulate(self,parent_vals)
parent_vals_copy = parent_vals.copy()
#collect values
vals = [1]
for par in self.parents:
par_val = parent_vals[par.name]
if type(par_val) is list:
vals.append(par_val[index])
else:
vals.append(par_val)
#mean
linear_comb = np.inner(self.params,vals)
mean = np.exp(linear_comb)
x = r_neg_binom(alpha=self.alpha,mean=mean,num=1)
return x
def mle(self,data):
assert type(data) is pd.DataFrame
parent_names = []
for par in self.parents:
assert par.name in data.columns
parent_names.append(par.name)
#get columns of interest
#column order should match parental order
data_sub = data[parent_names].values
response = data[self.name].values.tolist()
if len(self.parents) > 0:
#fit neg binomial model
X = [ ]
y = [ ]
for i in range(0,len(response)):
x = data_sub[i]
x_temp = [1]
x_temp.extend(x)
yval = np.int(response[i])
X.append(x_temp)
y.append(yval)
print y[i]
print X[i]
print len(y)
print len(X)
res = fit_neg_binom(y,X)
params = []
params.extend(res["params"])
alpha = np.float(res["alpha"])
NegativeBinomialNode.set_params(self,params,alpha)
else:
X = [ ]
y = [ ]
for v in response:
X.append([1])
y.append(v)
#fit neg binomial model
res = fit_neg_binom(y,X)
params = []
params.extend(res["params"])
alpha = np.float(res["alpha"])
NegativeBinomialNode.set_params(self,params,alpha)
|
christopher-gillies/MultiplePhenotypeAssociationBayesianNetwork
|
mpabn/bayesian_network.py
|
Python
|
mit
| 25,945
|
[
"Gaussian"
] |
74aa8fa3981d468e9a7027979f0b5291a7ed037d74913818032334e1c228bd9b
|
__author__ = 'Jason Piper'
import imp
current_version = imp.load_source('pyDNaseVersion', 'pyDNase/_version.py').__version__
#Unfortunately, we have to ensure that the user has numpy is installed,
#as pip is bad at installing numpy and matplotlib at the same time, and just breaks
try:
import numpy
except ImportError:
raise ImportError("Due to a quirk with pip, pyDNase requires numpy to be installed before starting setup")
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
setup(
name='pyDNase',
version=current_version,
description='DNase-seq analysis library',
long_description=open('README.rst',"rt").read(),
author='Jason Piper',
author_email='j.piper@warwick.ac.uk',
url='http://jpiper.github.io/pyDNase',
license='GPLv3',
ext_modules = [Extension("pyDNase.footprinting.fastbinom", ["pyDNase/footprinting/fastbinom.c"])],
packages= [
'pyDNase',
'pyDNase.footprinting',
],
install_requires=[
# Note - not enforcing versions for numpy and matplotlib
# Only basic functionality is used and the installation of these libraries can be a pain
"numpy", #Tested on >=1.5.0
"matplotlib", #Tested on >=1.2
"pysam >= 0.7.5",
"clint >= 0.3.2",
],
package_data = {'pyDNase':["data/*"]},
scripts=[
"pyDNase/scripts/dnase_average_profile.py",
"pyDNase/scripts/dnase_to_javatreeview.py",
"pyDNase/scripts/dnase_wig_tracks.py",
"pyDNase/scripts/wellington_footprints.py",
"pyDNase/scripts/examples/example_footprint_scores.py",
"pyDNase/scripts/dnase_to_JSON.py"],
test_suite="test",
)
|
simonvh/pyDNase
|
setup.py
|
Python
|
gpl-3.0
| 1,788
|
[
"pysam"
] |
c80ecf14289201f7b879fbcc9b967ce1b3ad569eb48ceb4009f2bb080e3439e4
|
#!/usr/bin/env python
"""
Get the currently defined user data volume quotas
Usage:
dirac-dms-user-quota [options]
Example:
$ dirac-dms-user-quota
Current quota found to be 0.0 GB
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
@DIRACScript()
def main():
Script.parseCommandLine(ignoreErrors=False)
import DIRAC
from DIRAC import gLogger, gConfig
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
res = getProxyInfo(False, False)
if not res['OK']:
gLogger.error("Failed to get client proxy information.", res['Message'])
DIRAC.exit(2)
proxyInfo = res['Value']
username = proxyInfo['username']
try:
quota = gConfig.getValue('/Registry/DefaultStorageQuota', 0.)
quota = gConfig.getValue('/Registry/Users/%s/Quota' % username, quota)
gLogger.notice('Current quota found to be %.1f GB' % quota)
DIRAC.exit(0)
except Exception as x:
gLogger.exception("Failed to convert retrieved quota", '', x)
DIRAC.exit(-1)
if __name__ == "__main__":
main()
|
yujikato/DIRAC
|
src/DIRAC/DataManagementSystem/scripts/dirac_dms_user_quota.py
|
Python
|
gpl-3.0
| 1,190
|
[
"DIRAC"
] |
58aa1590271912b794bc46d2a67aded5f69d480336c0a812914e71ebc2a18331
|
# Copyright 2000-2002 Andrew Dalke.
# Copyright 2002-2004 Brad Chapman.
# Copyright 2006-2010 by Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Represent a Sequence Record, a sequence with annotation."""
from Bio._py3k import basestring
__docformat__ = "restructuredtext en" # Simple markup to show doctests nicely
# NEEDS TO BE SYNCH WITH THE REST OF BIOPYTHON AND BIOPERL
# In particular, the SeqRecord and BioSQL.BioSeq.DBSeqRecord classes
# need to be in sync (this is the BioSQL "Database SeqRecord", see
# also BioSQL.BioSeq.DBSeq which is the "Database Seq" class)
class _RestrictedDict(dict):
"""Dict which only allows sequences of given length as values (PRIVATE).
This simple subclass of the Python dictionary is used in the SeqRecord
object for holding per-letter-annotations. This class is intended to
prevent simple errors by only allowing python sequences (e.g. lists,
strings and tuples) to be stored, and only if their length matches that
expected (the length of the SeqRecord's seq object). It cannot however
prevent the entries being edited in situ (for example appending entries
to a list).
>>> x = _RestrictedDict(5)
>>> x["test"] = "hello"
>>> x
{'test': 'hello'}
Adding entries which don't have the expected length are blocked:
>>> x["test"] = "hello world"
Traceback (most recent call last):
...
TypeError: We only allow python sequences (lists, tuples or strings) of length 5.
The expected length is stored as a private attribute,
>>> x._length
5
In order that the SeqRecord (and other objects using this class) can be
pickled, for example for use in the multiprocessing library, we need to
be able to pickle the restricted dictionary objects.
Using the default protocol, which is 0 on Python 2.x,
>>> import pickle
>>> y = pickle.loads(pickle.dumps(x))
>>> y
{'test': 'hello'}
>>> y._length
5
Using the highest protocol, which is 2 on Python 2.x,
>>> import pickle
>>> z = pickle.loads(pickle.dumps(x, pickle.HIGHEST_PROTOCOL))
>>> z
{'test': 'hello'}
>>> z._length
5
"""
def __init__(self, length):
"""Create an EMPTY restricted dictionary."""
dict.__init__(self)
self._length = int(length)
def __setitem__(self, key, value):
# The check hasattr(self, "_length") is to cope with pickle protocol 2
# I couldn't seem to avoid this with __getstate__ and __setstate__
if not hasattr(value, "__len__") or not hasattr(value, "__getitem__") \
or (hasattr(self, "_length") and len(value) != self._length):
raise TypeError("We only allow python sequences (lists, tuples or "
"strings) of length {0}.".format(self._length))
dict.__setitem__(self, key, value)
def update(self, new_dict):
# Force this to go via our strict __setitem__ method
for (key, value) in new_dict.items():
self[key] = value
class SeqRecord(object):
"""A SeqRecord object holds a sequence and information about it.
Main attributes:
- id - Identifier such as a locus tag (string)
- seq - The sequence itself (Seq object or similar)
Additional attributes:
- name - Sequence name, e.g. gene name (string)
- description - Additional text (string)
- dbxrefs - List of database cross references (list of strings)
- features - Any (sub)features defined (list of SeqFeature objects)
- annotations - Further information about the whole sequence (dictionary).
Most entries are strings, or lists of strings.
- letter_annotations - Per letter/symbol annotation (restricted
dictionary). This holds Python sequences (lists, strings
or tuples) whose length matches that of the sequence.
A typical use would be to hold a list of integers
representing sequencing quality scores, or a string
representing the secondary structure.
You will typically use Bio.SeqIO to read in sequences from files as
SeqRecord objects. However, you may want to create your own SeqRecord
objects directly (see the __init__ method for further details):
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Alphabet import IUPAC
>>> record = SeqRecord(Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF",
... IUPAC.protein),
... id="YP_025292.1", name="HokC",
... description="toxic membrane protein")
>>> print(record)
ID: YP_025292.1
Name: HokC
Description: toxic membrane protein
Number of features: 0
Seq('MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF', IUPACProtein())
If you want to save SeqRecord objects to a sequence file, use Bio.SeqIO
for this. For the special case where you want the SeqRecord turned into
a string in a particular file format there is a format method which uses
Bio.SeqIO internally:
>>> print(record.format("fasta"))
>YP_025292.1 toxic membrane protein
MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF
<BLANKLINE>
You can also do things like slicing a SeqRecord, checking its length, etc
>>> len(record)
44
>>> edited = record[:10] + record[11:]
>>> print(edited.seq)
MKQHKAMIVAIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF
>>> print(record.seq)
MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF
"""
def __init__(self, seq, id="<unknown id>", name="<unknown name>",
description="<unknown description>", dbxrefs=None,
features=None, annotations=None,
letter_annotations=None):
"""Create a SeqRecord.
Arguments:
- seq - Sequence, required (Seq, MutableSeq or UnknownSeq)
- id - Sequence identifier, recommended (string)
- name - Sequence name, optional (string)
- description - Sequence description, optional (string)
- dbxrefs - Database cross references, optional (list of strings)
- features - Any (sub)features, optional (list of SeqFeature objects)
- annotations - Dictionary of annotations for the whole sequence
- letter_annotations - Dictionary of per-letter-annotations, values
should be strings, list or tuples of the same
length as the full sequence.
You will typically use Bio.SeqIO to read in sequences from files as
SeqRecord objects. However, you may want to create your own SeqRecord
objects directly.
Note that while an id is optional, we strongly recommend you supply a
unique id string for each record. This is especially important
if you wish to write your sequences to a file.
If you don't have the actual sequence, but you do know its length,
then using the UnknownSeq object from Bio.Seq is appropriate.
You can create a 'blank' SeqRecord object, and then populate the
attributes later.
"""
if id is not None and not isinstance(id, basestring):
# Lots of existing code uses id=None... this may be a bad idea.
raise TypeError("id argument should be a string")
if not isinstance(name, basestring):
raise TypeError("name argument should be a string")
if not isinstance(description, basestring):
raise TypeError("description argument should be a string")
self._seq = seq
self.id = id
self.name = name
self.description = description
# database cross references (for the whole sequence)
if dbxrefs is None:
dbxrefs = []
elif not isinstance(dbxrefs, list):
raise TypeError("dbxrefs argument should be a list (of strings)")
self.dbxrefs = dbxrefs
# annotations about the whole sequence
if annotations is None:
annotations = {}
elif not isinstance(annotations, dict):
raise TypeError("annotations argument should be a dict")
self.annotations = annotations
if letter_annotations is None:
# annotations about each letter in the sequence
if seq is None:
# Should we allow this and use a normal unrestricted dict?
self._per_letter_annotations = _RestrictedDict(length=0)
else:
try:
self._per_letter_annotations = \
_RestrictedDict(length=len(seq))
except:
raise TypeError("seq argument should be a Seq object or similar")
else:
# This will be handled via the property set function, which will
# turn this into a _RestrictedDict and thus ensure all the values
# in the dict are the right length
self.letter_annotations = letter_annotations
# annotations about parts of the sequence
if features is None:
features = []
elif not isinstance(features, list):
raise TypeError("features argument should be a list (of SeqFeature objects)")
self.features = features
# TODO - Just make this a read only property?
def _set_per_letter_annotations(self, value):
if not isinstance(value, dict):
raise TypeError("The per-letter-annotations should be a "
"(restricted) dictionary.")
# Turn this into a restricted-dictionary (and check the entries)
try:
self._per_letter_annotations = _RestrictedDict(length=len(self.seq))
except AttributeError:
# e.g. seq is None
self._per_letter_annotations = _RestrictedDict(length=0)
self._per_letter_annotations.update(value)
letter_annotations = property(
fget=lambda self: self._per_letter_annotations,
fset=_set_per_letter_annotations,
doc="""Dictionary of per-letter-annotation for the sequence.
For example, this can hold quality scores used in FASTQ or QUAL files.
Consider this example using Bio.SeqIO to read in an example Solexa
variant FASTQ file as a SeqRecord:
>>> from Bio import SeqIO
>>> record = SeqIO.read("Quality/solexa_faked.fastq", "fastq-solexa")
>>> print("%s %s" % (record.id, record.seq))
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print(list(record.letter_annotations))
['solexa_quality']
>>> print(record.letter_annotations["solexa_quality"])
[40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1, -2, -3, -4, -5]
The letter_annotations get sliced automatically if you slice the
parent SeqRecord, for example taking the last ten bases:
>>> sub_record = record[-10:]
>>> print("%s %s" % (sub_record.id, sub_record.seq))
slxa_0001_1_0001_01 ACGTNNNNNN
>>> print(sub_record.letter_annotations["solexa_quality"])
[4, 3, 2, 1, 0, -1, -2, -3, -4, -5]
Any python sequence (i.e. list, tuple or string) can be recorded in
the SeqRecord's letter_annotations dictionary as long as the length
matches that of the SeqRecord's sequence. e.g.
>>> len(sub_record.letter_annotations)
1
>>> sub_record.letter_annotations["dummy"] = "abcdefghij"
>>> len(sub_record.letter_annotations)
2
You can delete entries from the letter_annotations dictionary as usual:
>>> del sub_record.letter_annotations["solexa_quality"]
>>> sub_record.letter_annotations
{'dummy': 'abcdefghij'}
You can completely clear the dictionary easily as follows:
>>> sub_record.letter_annotations = {}
>>> sub_record.letter_annotations
{}
""")
def _set_seq(self, value):
# TODO - Add a deprecation warning that the seq should be write only?
if self._per_letter_annotations:
# TODO - Make this a warning? Silently empty the dictionary?
raise ValueError("You must empty the letter annotations first!")
self._seq = value
try:
self._per_letter_annotations = _RestrictedDict(length=len(self.seq))
except AttributeError:
# e.g. seq is None
self._per_letter_annotations = _RestrictedDict(length=0)
seq = property(fget=lambda self: self._seq,
fset=_set_seq,
doc="The sequence itself, as a Seq or MutableSeq object.")
def __getitem__(self, index):
"""Returns a sub-sequence or an individual letter.
Slicing, e.g. my_record[5:10], returns a new SeqRecord for
that sub-sequence with appropriate annotation preserved. The
name, id and description are kept.
Any per-letter-annotations are sliced to match the requested
sub-sequence. Unless a stride is used, all those features
which fall fully within the subsequence are included (with
their locations adjusted accordingly).
However, the annotations dictionary and the dbxrefs list are
not used for the new SeqRecord, as in general they may not
apply to the subsequence. If you want to preserve them, you
must explicitly copy them to the new SeqRecord yourself.
Using an integer index, e.g. my_record[5] is shorthand for
extracting that letter from the sequence, my_record.seq[5].
For example, consider this short protein and its secondary
structure as encoded by the PDB (e.g. H for alpha helices),
plus a simple feature for its histidine self phosphorylation
site:
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.SeqFeature import SeqFeature, FeatureLocation
>>> from Bio.Alphabet import IUPAC
>>> rec = SeqRecord(Seq("MAAGVKQLADDRTLLMAGVSHDLRTPLTRIRLAT"
... "EMMSEQDGYLAESINKDIEECNAIIEQFIDYLR",
... IUPAC.protein),
... id="1JOY", name="EnvZ",
... description="Homodimeric domain of EnvZ from E. coli")
>>> rec.letter_annotations["secondary_structure"] = " S SSSSSSHHHHHTTTHHHHHHHHHHHHHHHHHHHHHHTHHHHHHHHHHHHHHHHHHHHHTT "
>>> rec.features.append(SeqFeature(FeatureLocation(20, 21),
... type = "Site"))
Now let's have a quick look at the full record,
>>> print(rec)
ID: 1JOY
Name: EnvZ
Description: Homodimeric domain of EnvZ from E. coli
Number of features: 1
Per letter annotation for: secondary_structure
Seq('MAAGVKQLADDRTLLMAGVSHDLRTPLTRIRLATEMMSEQDGYLAESINKDIEE...YLR', IUPACProtein())
>>> print(rec.letter_annotations["secondary_structure"])
S SSSSSSHHHHHTTTHHHHHHHHHHHHHHHHHHHHHHTHHHHHHHHHHHHHHHHHHHHHTT
>>> print(rec.features[0].location)
[20:21]
Now let's take a sub sequence, here chosen as the first (fractured)
alpha helix which includes the histidine phosphorylation site:
>>> sub = rec[11:41]
>>> print(sub)
ID: 1JOY
Name: EnvZ
Description: Homodimeric domain of EnvZ from E. coli
Number of features: 1
Per letter annotation for: secondary_structure
Seq('RTLLMAGVSHDLRTPLTRIRLATEMMSEQD', IUPACProtein())
>>> print(sub.letter_annotations["secondary_structure"])
HHHHHTTTHHHHHHHHHHHHHHHHHHHHHH
>>> print(sub.features[0].location)
[9:10]
You can also of course omit the start or end values, for
example to get the first ten letters only:
>>> print(rec[:10])
ID: 1JOY
Name: EnvZ
Description: Homodimeric domain of EnvZ from E. coli
Number of features: 0
Per letter annotation for: secondary_structure
Seq('MAAGVKQLAD', IUPACProtein())
Or for the last ten letters:
>>> print(rec[-10:])
ID: 1JOY
Name: EnvZ
Description: Homodimeric domain of EnvZ from E. coli
Number of features: 0
Per letter annotation for: secondary_structure
Seq('IIEQFIDYLR', IUPACProtein())
If you omit both, then you get a copy of the original record (although
lacking the annotations and dbxrefs):
>>> print(rec[:])
ID: 1JOY
Name: EnvZ
Description: Homodimeric domain of EnvZ from E. coli
Number of features: 1
Per letter annotation for: secondary_structure
Seq('MAAGVKQLADDRTLLMAGVSHDLRTPLTRIRLATEMMSEQDGYLAESINKDIEE...YLR', IUPACProtein())
Finally, indexing with a simple integer is shorthand for pulling out
that letter from the sequence directly:
>>> rec[5]
'K'
>>> rec.seq[5]
'K'
"""
if isinstance(index, int):
# NOTE - The sequence level annotation like the id, name, etc
# do not really apply to a single character. However, should
# we try and expose any per-letter-annotation here? If so how?
return self.seq[index]
elif isinstance(index, slice):
if self.seq is None:
raise ValueError("If the sequence is None, we cannot slice it.")
parent_length = len(self)
answer = self.__class__(self.seq[index],
id=self.id,
name=self.name,
description=self.description)
# TODO - The description may no longer apply.
# It would be safer to change it to something
# generic like "edited" or the default value.
# Don't copy the annotation dict and dbxefs list,
# they may not apply to a subsequence.
# answer.annotations = dict(self.annotations.items())
# answer.dbxrefs = self.dbxrefs[:]
# TODO - Review this in light of adding SeqRecord objects?
# TODO - Cope with strides by generating ambiguous locations?
start, stop, step = index.indices(parent_length)
if step == 1:
# Select relevant features, add them with shifted locations
# assert str(self.seq)[index] == str(self.seq)[start:stop]
for f in self.features:
if f.ref or f.ref_db:
# TODO - Implement this (with lots of tests)?
import warnings
warnings.warn("When slicing SeqRecord objects, any "
"SeqFeature referencing other sequences (e.g. "
"from segmented GenBank records) are ignored.")
continue
if start <= f.location.nofuzzy_start \
and f.location.nofuzzy_end <= stop:
answer.features.append(f._shift(-start))
# Slice all the values to match the sliced sequence
# (this should also work with strides, even negative strides):
for key, value in self.letter_annotations.items():
answer._per_letter_annotations[key] = value[index]
return answer
raise ValueError("Invalid index")
def __iter__(self):
"""Iterate over the letters in the sequence.
For example, using Bio.SeqIO to read in a protein FASTA file:
>>> from Bio import SeqIO
>>> record = SeqIO.read("Fasta/loveliesbleeding.pro", "fasta")
>>> for amino in record:
... print(amino)
... if amino == "L": break
X
A
G
L
>>> print(record.seq[3])
L
This is just a shortcut for iterating over the sequence directly:
>>> for amino in record.seq:
... print(amino)
... if amino == "L": break
X
A
G
L
>>> print(record.seq[3])
L
Note that this does not facilitate iteration together with any
per-letter-annotation. However, you can achieve that using the
python zip function on the record (or its sequence) and the relevant
per-letter-annotation:
>>> from Bio import SeqIO
>>> rec = SeqIO.read("Quality/solexa_faked.fastq", "fastq-solexa")
>>> print("%s %s" % (rec.id, rec.seq))
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print(list(rec.letter_annotations))
['solexa_quality']
>>> for nuc, qual in zip(rec, rec.letter_annotations["solexa_quality"]):
... if qual > 35:
... print("%s %i" % (nuc, qual))
A 40
C 39
G 38
T 37
A 36
You may agree that using zip(rec.seq, ...) is more explicit than using
zip(rec, ...) as shown above.
"""
return iter(self.seq)
def __contains__(self, char):
"""Implements the 'in' keyword, searches the sequence.
e.g.
>>> from Bio import SeqIO
>>> record = SeqIO.read("Fasta/sweetpea.nu", "fasta")
>>> "GAATTC" in record
False
>>> "AAA" in record
True
This essentially acts as a proxy for using "in" on the sequence:
>>> "GAATTC" in record.seq
False
>>> "AAA" in record.seq
True
Note that you can also use Seq objects as the query,
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_dna
>>> Seq("AAA") in record
True
>>> Seq("AAA", generic_dna) in record
True
See also the Seq object's __contains__ method.
"""
return char in self.seq
def __str__(self):
"""A human readable summary of the record and its annotation (string).
The python built in function str works by calling the object's ___str__
method. e.g.
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Alphabet import IUPAC
>>> record = SeqRecord(Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF",
... IUPAC.protein),
... id="YP_025292.1", name="HokC",
... description="toxic membrane protein, small")
>>> print(str(record))
ID: YP_025292.1
Name: HokC
Description: toxic membrane protein, small
Number of features: 0
Seq('MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF', IUPACProtein())
In this example you don't actually need to call str explicity, as the
print command does this automatically:
>>> print(record)
ID: YP_025292.1
Name: HokC
Description: toxic membrane protein, small
Number of features: 0
Seq('MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF', IUPACProtein())
Note that long sequences are shown truncated.
"""
lines = []
if self.id:
lines.append("ID: {0}".format(self.id))
if self.name:
lines.append("Name: {0}".format(self.name))
if self.description:
lines.append("Description: {0}".format(self.description))
if self.dbxrefs:
lines.append("Database cross-references: " + ", ".join(self.dbxrefs))
lines.append("Number of features: {0}".format(len(self.features)))
for a in self.annotations:
lines.append("/{0}={1}".format(a, str(self.annotations[a])))
if self.letter_annotations:
lines.append("Per letter annotation for: " + ", ".join(self.letter_annotations))
# Don't want to include the entire sequence,
# and showing the alphabet is useful:
lines.append(repr(self.seq))
return "\n".join(lines)
def __repr__(self):
"""A concise summary of the record for debugging (string).
The python built in function repr works by calling the object's ___repr__
method. e.g.
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Alphabet import generic_protein
>>> rec = SeqRecord(Seq("MASRGVNKVILVGNLGQDPEVRYMPNGGAVANITLATSESWRDKAT"
... +"GEMKEQTEWHRVVLFGKLAEVASEYLRKGSQVYIEGQLRTRKWTDQ"
... +"SGQDRYTTEVVVNVGGTMQMLGGRQGGGAPAGGNIGGGQPQGGWGQ"
... +"PQQPQGGNQFSGGAQSRPQQSAPAAPSNEPPMDFDDDIPF",
... generic_protein),
... id="NP_418483.1", name="b4059",
... description="ssDNA-binding protein",
... dbxrefs=["ASAP:13298", "GI:16131885", "GeneID:948570"])
>>> print(repr(rec))
SeqRecord(seq=Seq('MASRGVNKVILVGNLGQDPEVRYMPNGGAVANITLATSESWRDKATGEMKEQTE...IPF', ProteinAlphabet()), id='NP_418483.1', name='b4059', description='ssDNA-binding protein', dbxrefs=['ASAP:13298', 'GI:16131885', 'GeneID:948570'])
At the python prompt you can also use this shorthand:
>>> rec
SeqRecord(seq=Seq('MASRGVNKVILVGNLGQDPEVRYMPNGGAVANITLATSESWRDKATGEMKEQTE...IPF', ProteinAlphabet()), id='NP_418483.1', name='b4059', description='ssDNA-binding protein', dbxrefs=['ASAP:13298', 'GI:16131885', 'GeneID:948570'])
Note that long sequences are shown truncated. Also note that any
annotations, letter_annotations and features are not shown (as they
would lead to a very long string).
"""
return "{0}(seq={1!r}, id={2!r}, name={3!r}, description={4!r}, dbxrefs={5!r})".format(
self.__class__.__name__,
self.seq, self.id, self.name,
self.description, self.dbxrefs)
def format(self, format):
r"""Returns the record as a string in the specified file format.
The format should be a lower case string supported as an output
format by Bio.SeqIO, which is used to turn the SeqRecord into a
string. e.g.
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Alphabet import IUPAC
>>> record = SeqRecord(Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF",
... IUPAC.protein),
... id="YP_025292.1", name="HokC",
... description="toxic membrane protein")
>>> record.format("fasta")
'>YP_025292.1 toxic membrane protein\nMKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF\n'
>>> print(record.format("fasta"))
>YP_025292.1 toxic membrane protein
MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF
<BLANKLINE>
The python print command automatically appends a new line, meaning
in this example a blank line is shown. If you look at the string
representation you can see there is a trailing new line (shown as
slash n) which is important when writing to a file or if
concatenating multiple sequence strings together.
Note that this method will NOT work on every possible file format
supported by Bio.SeqIO (e.g. some are for multiple sequences only).
"""
# See also the __format__ added for Python 2.6 / 3.0, PEP 3101
# See also the Bio.Align.Generic.Alignment class and its format()
return self.__format__(format)
def __format__(self, format_spec):
"""Returns the record as a string in the specified file format.
This method supports the python format() function added in
Python 2.6/3.0. The format_spec should be a lower case string
supported by Bio.SeqIO as an output file format. See also the
SeqRecord's format() method.
Under Python 3 please note that for binary formats a bytes
string is returned, otherwise a (unicode) string is returned.
"""
if not format_spec:
# Follow python convention and default to using __str__
return str(self)
from Bio import SeqIO
if format_spec in SeqIO._BinaryFormats:
# Return bytes on Python 3
from io import BytesIO
handle = BytesIO()
else:
from Bio._py3k import StringIO
handle = StringIO()
SeqIO.write(self, handle, format_spec)
return handle.getvalue()
def __len__(self):
"""Returns the length of the sequence.
For example, using Bio.SeqIO to read in a FASTA nucleotide file:
>>> from Bio import SeqIO
>>> record = SeqIO.read("Fasta/sweetpea.nu", "fasta")
>>> len(record)
309
>>> len(record.seq)
309
"""
return len(self.seq)
# Python 3:
def __bool__(self):
"""Boolean value of an instance of this class (True).
This behaviour is for backwards compatibility, since until the
__len__ method was added, a SeqRecord always evaluated as True.
Note that in comparison, a Seq object will evaluate to False if it
has a zero length sequence.
WARNING: The SeqRecord may in future evaluate to False when its
sequence is of zero length (in order to better match the Seq
object behaviour)!
"""
return True
# Python 2:
__nonzero__ = __bool__
def __add__(self, other):
"""Add another sequence or string to this sequence.
The other sequence can be a SeqRecord object, a Seq object (or
similar, e.g. a MutableSeq) or a plain Python string. If you add
a plain string or a Seq (like) object, the new SeqRecord will simply
have this appended to the existing data. However, any per letter
annotation will be lost:
>>> from Bio import SeqIO
>>> record = SeqIO.read("Quality/solexa_faked.fastq", "fastq-solexa")
>>> print("%s %s" % (record.id, record.seq))
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print(list(record.letter_annotations))
['solexa_quality']
>>> new = record + "ACT"
>>> print("%s %s" % (new.id, new.seq))
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNNACT
>>> print(list(new.letter_annotations))
[]
The new record will attempt to combine the annotation, but for any
ambiguities (e.g. different names) it defaults to omitting that
annotation.
>>> from Bio import SeqIO
>>> with open("GenBank/pBAD30.gb") as handle:
... plasmid = SeqIO.read(handle, "gb")
>>> print("%s %i" % (plasmid.id, len(plasmid)))
pBAD30 4923
Now let's cut the plasmid into two pieces, and join them back up the
other way round (i.e. shift the starting point on this plasmid, have
a look at the annotated features in the original file to see why this
particular split point might make sense):
>>> left = plasmid[:3765]
>>> right = plasmid[3765:]
>>> new = right + left
>>> print("%s %i" % (new.id, len(new)))
pBAD30 4923
>>> str(new.seq) == str(right.seq + left.seq)
True
>>> len(new.features) == len(left.features) + len(right.features)
True
When we add the left and right SeqRecord objects, their annotation
is all consistent, so it is all conserved in the new SeqRecord:
>>> new.id == left.id == right.id == plasmid.id
True
>>> new.name == left.name == right.name == plasmid.name
True
>>> new.description == plasmid.description
True
>>> new.annotations == left.annotations == right.annotations
True
>>> new.letter_annotations == plasmid.letter_annotations
True
>>> new.dbxrefs == left.dbxrefs == right.dbxrefs
True
However, we should point out that when we sliced the SeqRecord,
any annotations dictionary or dbxrefs list entries were lost.
You can explicitly copy them like this:
>>> new.annotations = plasmid.annotations.copy()
>>> new.dbxrefs = plasmid.dbxrefs[:]
"""
if not isinstance(other, SeqRecord):
# Assume it is a string or a Seq.
# Note can't transfer any per-letter-annotations
return SeqRecord(self.seq + other,
id=self.id, name=self.name,
description=self.description,
features=self.features[:],
annotations=self.annotations.copy(),
dbxrefs=self.dbxrefs[:])
# Adding two SeqRecord objects... must merge annotation.
answer = SeqRecord(self.seq + other.seq,
features=self.features[:],
dbxrefs=self.dbxrefs[:])
# Will take all the features and all the db cross refs,
l = len(self)
for f in other.features:
answer.features.append(f._shift(l))
del l
for ref in other.dbxrefs:
if ref not in answer.dbxrefs:
answer.dbxrefs.append(ref)
# Take common id/name/description/annotation
if self.id == other.id:
answer.id = self.id
if self.name == other.name:
answer.name = self.name
if self.description == other.description:
answer.description = self.description
for k, v in self.annotations.items():
if k in other.annotations and other.annotations[k] == v:
answer.annotations[k] = v
# Can append matching per-letter-annotation
for k, v in self.letter_annotations.items():
if k in other.letter_annotations:
answer.letter_annotations[k] = v + other.letter_annotations[k]
return answer
def __radd__(self, other):
"""Add another sequence or string to this sequence (from the left).
This method handles adding a Seq object (or similar, e.g. MutableSeq)
or a plain Python string (on the left) to a SeqRecord (on the right).
See the __add__ method for more details, but for example:
>>> from Bio import SeqIO
>>> record = SeqIO.read("Quality/solexa_faked.fastq", "fastq-solexa")
>>> print("%s %s" % (record.id, record.seq))
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print(list(record.letter_annotations))
['solexa_quality']
>>> new = "ACT" + record
>>> print("%s %s" % (new.id, new.seq))
slxa_0001_1_0001_01 ACTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print(list(new.letter_annotations))
[]
"""
if isinstance(other, SeqRecord):
raise RuntimeError("This should have happened via the __add__ of "
"the other SeqRecord being added!")
# Assume it is a string or a Seq.
# Note can't transfer any per-letter-annotations
offset = len(other)
return SeqRecord(other + self.seq,
id=self.id, name=self.name,
description=self.description,
features=[f._shift(offset) for f in self.features],
annotations=self.annotations.copy(),
dbxrefs=self.dbxrefs[:])
def upper(self):
"""Returns a copy of the record with an upper case sequence.
All the annotation is preserved unchanged. e.g.
>>> from Bio.Alphabet import generic_dna
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> record = SeqRecord(Seq("acgtACGT", generic_dna), id="Test",
... description = "Made up for this example")
>>> record.letter_annotations["phred_quality"] = [1, 2, 3, 4, 5, 6, 7, 8]
>>> print(record.upper().format("fastq"))
@Test Made up for this example
ACGTACGT
+
"#$%&'()
<BLANKLINE>
Naturally, there is a matching lower method:
>>> print(record.lower().format("fastq"))
@Test Made up for this example
acgtacgt
+
"#$%&'()
<BLANKLINE>
"""
return SeqRecord(self.seq.upper(),
id=self.id, name=self.name,
description=self.description,
dbxrefs=self.dbxrefs[:],
features=self.features[:],
annotations=self.annotations.copy(),
letter_annotations=self.letter_annotations.copy())
def lower(self):
"""Returns a copy of the record with a lower case sequence.
All the annotation is preserved unchanged. e.g.
>>> from Bio import SeqIO
>>> record = SeqIO.read("Fasta/aster.pro", "fasta")
>>> print(record.format("fasta"))
>gi|3298468|dbj|BAA31520.1| SAMIPF
GGHVNPAVTFGAFVGGNITLLRGIVYIIAQLLGSTVACLLLKFVTNDMAVGVFSLSAGVG
VTNALVFEIVMTFGLVYTVYATAIDPKKGSLGTIAPIAIGFIVGANI
<BLANKLINE>
>>> print(record.lower().format("fasta"))
>gi|3298468|dbj|BAA31520.1| SAMIPF
gghvnpavtfgafvggnitllrgivyiiaqllgstvaclllkfvtndmavgvfslsagvg
vtnalvfeivmtfglvytvyataidpkkgslgtiapiaigfivgani
<BLANKLINE>
To take a more annotation rich example,
>>> from Bio import SeqIO
>>> old = SeqIO.read("EMBL/TRBG361.embl", "embl")
>>> len(old.features)
3
>>> new = old.lower()
>>> len(old.features) == len(new.features)
True
>>> old.annotations["organism"] == new.annotations["organism"]
True
>>> old.dbxrefs == new.dbxrefs
True
"""
return SeqRecord(self.seq.lower(),
id=self.id, name=self.name,
description=self.description,
dbxrefs=self.dbxrefs[:],
features=self.features[:],
annotations=self.annotations.copy(),
letter_annotations=self.letter_annotations.copy())
def reverse_complement(self, id=False, name=False, description=False,
features=True, annotations=False,
letter_annotations=True, dbxrefs=False):
"""Returns new SeqRecord with reverse complement sequence.
You can specify the returned record's id, name and description as
strings, or True to keep that of the parent, or False for a default.
You can specify the returned record's features with a list of
SeqFeature objects, or True to keep that of the parent, or False to
omit them. The default is to keep the original features (with the
strand and locations adjusted).
You can also specify both the returned record's annotations and
letter_annotations as dictionaries, True to keep that of the parent,
or False to omit them. The default is to keep the original
annotations (with the letter annotations reversed).
To show what happens to the pre-letter annotations, consider an
example Solexa variant FASTQ file with a single entry, which we'll
read in as a SeqRecord:
>>> from Bio import SeqIO
>>> record = SeqIO.read("Quality/solexa_faked.fastq", "fastq-solexa")
>>> print("%s %s" % (record.id, record.seq))
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print(list(record.letter_annotations))
['solexa_quality']
>>> print(record.letter_annotations["solexa_quality"])
[40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1, -2, -3, -4, -5]
Now take the reverse complement,
>>> rc_record = record.reverse_complement(id=record.id+"_rc")
>>> print("%s %s" % (rc_record.id, rc_record.seq))
slxa_0001_1_0001_01_rc NNNNNNACGTACGTACGTACGTACGTACGTACGTACGTACGTACGT
Notice that the per-letter-annotations have also been reversed,
although this may not be appropriate for all cases.
>>> print(rc_record.letter_annotations["solexa_quality"])
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40]
Now for the features, we need a different example. Parsing a GenBank
file is probably the easiest way to get an nice example with features
in it...
>>> from Bio import SeqIO
>>> with open("GenBank/pBAD30.gb") as handle:
... plasmid = SeqIO.read(handle, "gb")
>>> print("%s %i" % (plasmid.id, len(plasmid)))
pBAD30 4923
>>> plasmid.seq
Seq('GCTAGCGGAGTGTATACTGGCTTACTATGTTGGCACTGATGAGGGTGTCAGTGA...ATG', IUPACAmbiguousDNA())
>>> len(plasmid.features)
13
Now, let's take the reverse complement of this whole plasmid:
>>> rc_plasmid = plasmid.reverse_complement(id=plasmid.id+"_rc")
>>> print("%s %i" % (rc_plasmid.id, len(rc_plasmid)))
pBAD30_rc 4923
>>> rc_plasmid.seq
Seq('CATGGGCAAATATTATACGCAAGGCGACAAGGTGCTGATGCCGCTGGCGATTCA...AGC', IUPACAmbiguousDNA())
>>> len(rc_plasmid.features)
13
Let's compare the first CDS feature - it has gone from being the
second feature (index 1) to the second last feature (index -2), its
strand has changed, and the location switched round.
>>> print(plasmid.features[1])
type: CDS
location: [1081:1960](-)
qualifiers:
Key: label, Value: ['araC']
Key: note, Value: ['araC regulator of the arabinose BAD promoter']
Key: vntifkey, Value: ['4']
<BLANKLINE>
>>> print(rc_plasmid.features[-2])
type: CDS
location: [2963:3842](+)
qualifiers:
Key: label, Value: ['araC']
Key: note, Value: ['araC regulator of the arabinose BAD promoter']
Key: vntifkey, Value: ['4']
<BLANKLINE>
You can check this new location, based on the length of the plasmid:
>>> len(plasmid) - 1081
3842
>>> len(plasmid) - 1960
2963
Note that if the SeqFeature annotation includes any strand specific
information (e.g. base changes for a SNP), this information is not
amended, and would need correction after the reverse complement.
Note trying to reverse complement a protein SeqRecord raises an
exception:
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC
>>> protein_rec = SeqRecord(Seq("MAIVMGR", IUPAC.protein), id="Test")
>>> protein_rec.reverse_complement()
Traceback (most recent call last):
...
ValueError: Proteins do not have complements!
Also note you can reverse complement a SeqRecord using a MutableSeq:
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Seq import MutableSeq
>>> from Bio.Alphabet import generic_dna
>>> rec = SeqRecord(MutableSeq("ACGT", generic_dna), id="Test")
>>> rec.seq[0] = "T"
>>> print("%s %s" % (rec.id, rec.seq))
Test TCGT
>>> rc = rec.reverse_complement(id=True)
>>> print("%s %s" % (rc.id, rc.seq))
Test ACGA
"""
from Bio.Seq import MutableSeq # Lazy to avoid circular imports
if isinstance(self.seq, MutableSeq):
# Currently the MutableSeq reverse complement is in situ
answer = SeqRecord(self.seq.toseq().reverse_complement())
else:
answer = SeqRecord(self.seq.reverse_complement())
if isinstance(id, basestring):
answer.id = id
elif id:
answer.id = self.id
if isinstance(name, basestring):
answer.name = name
elif name:
answer.name = self.name
if isinstance(description, basestring):
answer.description = description
elif description:
answer.description = self.description
if isinstance(dbxrefs, list):
answer.dbxrefs = dbxrefs
elif dbxrefs:
# Copy the old dbxrefs
answer.dbxrefs = self.dbxrefs[:]
if isinstance(features, list):
answer.features = features
elif features:
# Copy the old features, adjusting location and string
l = len(answer)
answer.features = [f._flip(l) for f in self.features]
# The old list should have been sorted by start location,
# reversing it will leave it sorted by what is now the end position,
# so we need to resort in case of overlapping features.
# NOTE - In the common case of gene before CDS (and similar) with
# the exact same locations, this will still maintain gene before CDS
answer.features.sort(key=lambda x: x.location.start.position)
if isinstance(annotations, dict):
answer.annotations = annotations
elif annotations:
# Copy the old annotations,
answer.annotations = self.annotations.copy()
if isinstance(letter_annotations, dict):
answer.letter_annotations = letter_annotations
elif letter_annotations:
# Copy the old per letter annotations, reversing them
for key, value in self.letter_annotations.items():
answer._per_letter_annotations[key] = value[::-1]
return answer
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/SeqRecord.py
|
Python
|
apache-2.0
| 46,584
|
[
"BioPerl",
"Biopython"
] |
7d82174f2b9ce22f693db036ab1cc20daff1527bcffabb2f0d70ecbc8e2f09c2
|
#
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: k8s
version_added: "2.5"
short_description: Query the K8s API
description:
- Uses the OpenShift Python client to fetch a specific object by name, all matching objects within a
namespace, or all matching objects for all namespaces.
- Provides access the full range of K8s APIs.
- Enables authentication via config file, certificates, password or token.
options:
api_version:
description:
- Use to specify the API version. If I(resource definition) is provided, the I(apiVersion) from the
I(resource_definition) will override this option.
default: v1
kind:
description:
- Use to specify an object model. If I(resource definition) is provided, the I(kind) from a
I(resource_definition) will override this option.
required: true
resource_name:
description:
- Fetch a specific object by name. If I(resource definition) is provided, the I(metadata.name) value
from the I(resource_definition) will override this option.
namespace:
description:
- Limit the objects returned to a specific namespace. If I(resource definition) is provided, the
I(metadata.namespace) value from the I(resource_definition) will override this option.
label_selector:
description:
- Additional labels to include in the query. Ignored when I(resource_name) is provided.
field_selector:
description:
- Specific fields on which to query. Ignored when I(resource_name) is provided.
resource_definition:
description:
- "Provide a YAML configuration for an object. NOTE: I(kind), I(api_version), I(resource_name),
and I(namespace) will be overwritten by corresponding values found in the provided I(resource_definition)."
src:
description:
- "Provide a path to a file containing a valid YAML definition of an object dated. Mutually
exclusive with I(resource_definition). NOTE: I(kind), I(api_version), I(resource_name), and I(namespace)
will be overwritten by corresponding values found in the configuration read in from the I(src) file."
- Reads from the local file system. To read from the Ansible controller's file system, use the file lookup
plugin or template lookup plugin, combined with the from_yaml filter, and pass the result to
I(resource_definition). See Examples below.
host:
description:
- Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
api_key:
description:
- Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment variable.
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the openshift client will attempt to load the default
configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG environment
variable.
context:
description:
- The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
variable.
username:
description:
- Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME environment
variable.
password:
description:
- Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD environment
variable.
cert_file:
description:
- Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
environment
variable.
key_file:
description:
- Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_HOST environment
variable.
ssl_ca_cert:
description:
- Path to a CA certificate used to authenticate with the API. Can also be specified via K8S_AUTH_SSL_CA_CERT
environment variable.
verify_ssl:
description:
- Whether or not to verify the API server's SSL certificates. Can also be specified via K8S_AUTH_VERIFY_SSL
environment variable.
type: bool
requirements:
- "python >= 2.7"
- "openshift >= 0.3"
- "PyYAML >= 3.11"
notes:
- "The OpenShift Python client wraps the K8s Python client, providing full access to
all of the APIS and models available on both platforms. For API version details and
additional information visit https://github.com/openshift/openshift-restclient-python"
"""
EXAMPLES = """
- name: Fetch a list of namespaces
set_fact:
projects: "{{ lookup('k8s', api_version='v1', kind='Namespace') }}"
- name: Fetch all deployments
set_fact:
deployments: "{{ lookup('k8s', kind='Deployment', namespace='testing') }}"
- name: Fetch all deployments in a namespace
set_fact:
deployments: "{{ lookup('k8s', kind='Deployment', namespace='testing') }}"
- name: Fetch a specific deployment by name
set_fact:
deployments: "{{ lookup('k8s', kind='Deployment', namespace='testing', resource_name='elastic') }}"
- name: Fetch with label selector
set_fact:
service: "{{ lookup('k8s', kind='Service', label_selector='app=galaxy') }}"
# Use parameters from a YAML config
- name: Load config from the Ansible controller filesystem
set_fact:
config: "{{ lookup('file', 'service.yml') | from_yaml }}"
- name: Using the config (loaded from a file in prior task), fetch the latest version of the object
set_fact:
service: "{{ lookup('k8s', resource_definition=config) }}"
- name: Use a config from the local filesystem
set_fact:
service: "{{ lookup('k8s', src='service.yml') }}"
"""
RETURN = """
_list:
description:
- One ore more object definitions returned from the API.
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
"""
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.k8s.lookup import KubernetesLookup
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
return KubernetesLookup().run(terms, variables=variables, **kwargs)
|
ravibhure/ansible
|
lib/ansible/plugins/lookup/k8s.py
|
Python
|
gpl-3.0
| 7,896
|
[
"Galaxy",
"VisIt"
] |
b0ce83830682ff83c37069d1ac6e12ede9319520d413977b1d925162d2cffe7a
|
# pylint: disable=invalid-name,no-init
from __future__ import (absolute_import, division, print_function)
from mantid.simpleapi import *
from mantid.api import FrameworkManager
import copy
import os
import re
import stresstesting
BANNED_FILES = ['80_tubes_Top_and_Bottom_April_2015.xml',
'80_tubes_Top_and_Bottom_May_2016.xml',
'80tubeCalibration_18-04-2016_r9330-9335.nxs',
'80tube_DIRECT_3146_M1_30April15_r3146.dat',
'992 Descriptions.txt',
'directBeamDatabaseFall2014_IPTS_11601_2.cfg',
'BASIS_AutoReduction_Mask.xml',
'BioSANS_dark_current.xml',
'BioSANS_empty_cell.xml',
'BioSANS_empty_trans.xml',
'BioSANS_exp61_scan0004_0001.xml',
'BioSANS_flood_data.xml',
'BioSANS_sample_trans.xml',
'C6H5Cl-Gaussian.log',
'CNCS_TS_2008_08_18.dat',
'DISF_NaF.cdl',
'det_corrected7.dat',
'det_LET_cycle12-3.dat',
'DIRECT_M1_21Nov15_6x8mm_0.9_20.0_r6279_extrapolated.dat',
'eqsans_configuration.1463',
'FLAT_CELL.061',
'HYSA_mask.xml',
'IN10_P3OT_350K.inx',
'IN13_16347.asc',
'IN16_65722.asc',
'IP0005.dat',
'batch_input.csv',
'mar11015.msk',
'LET_hard.msk', # It seems loade does not understand it?
'MASK.094AA',
'MASKSANS2D_094i_RKH.txt',
'MASKSANS2D.091A',
'MASKSANS2Doptions.091A',
'MASK_squareBeamstop_20-June-2015.xml',
'MaskSANS2DReductionGUI.txt',
'MaskSANS2DReductionGUI_MaskFiles.txt',
'MaskSANS2DReductionGUI_LimitEventsTime.txt',
'MASK_SANS2D_FRONT_Edges_16Mar2015.xml',
'MASK_SANS2D_REAR_Bottom_3_tubes_16May2014.xml',
'MASK_SANS2D_REAR_Edges_16Mar2015.xml',
'MASK_SANS2D_REAR_module2_tube12.xml',
'MASK_SANS2D_beam_stop_4m_x_100mm_2July2015_medium_beamstop.xml',
'MASK_SANS2D_BOTH_Extras_24Mar2015.xml',
'MASK_Tube6.xml',
'MASK_squareBeamstop_6x8Beam_11-October-2016.xml',
'MAP17269.raw', # Don't need to check multiple MAPS files
'MAP17589.raw',
'MER06399.raw', # Don't need to check multiple MERLIN files
'PG3_11485-1.dat', # Generic load doesn't do very well with ASCII files
'PG3_2538_event.nxs', # Don't need to check all of the PG3 files
'PG3_9829_event.nxs',
'REF_M_9684_event.nxs',
'REF_M_9709_event.nxs',
'REF_M_24945_event.nxs',
'REF_M_24949_event.nxs',
'SANS2D_periodTests.csv',
'SANS2D_992_91A.csv',
'SANS2D_mask_batch.csv',
'sans2d_reduction_gui_batch.csv',
'squaricn.phonon',
'test_isotopes.phonon',
'squaricn.castep',
'target_circles_mask.xml',
'tube10_mask.xml',
'linked_circles_mask.xml',
'testCansas1DMultiEntry.xml',
'Wish_Diffuse_Scattering_ISAW_UB.mat',
'WSH_test.dat',
'WISH00035991.raw',
'WISH00038237.raw',
'SANS2D_multiPeriodTests.csv',
'SANS2D_periodTests.csv',
'SANS2DTube_ZerroErrorFreeTest.txt',
'SANS2DTUBES_ZeroErrorFree_batch.csv',
'DIRECTM1_15785_12m_31Oct12_v12.dat',
'MaskSANS2DReductionGUI.txt',
'sans2d_reduction_gui_batch.csv',
'MANTID_FLAT_CELL.115',
'MaskLOQData.txt',
'DIRECTHAB.983',
'loq_batch_mode_reduction.csv',
'det_corrected7.nxs', # this file can be loaded by LoadDetectorInfo; not sure if generic loader should ever deal with it
'poldi2013n006903.hdf',
'poldi2013n006904.hdf',
'poldi2014n019874.hdf',
'poldi2014n019881.hdf',
'poldi2015n000977.hdf',
'USER_SANS2D_143ZC_2p4_4m_M4_Knowles_12mm.txt',
'USER_LARMOR_151B_LarmorTeam_80tubes_BenchRot1p4_M4_r3699.txt',
'USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt',
'USER_Larmor_163F_HePATest_r13038.txt',
'Vesuvio_IP_file_test.par',
'IP0004_10.par',
'Crystalb3lypScratchAbins.out',
'V15_0000016544_S000_P01.raw',
'TolueneTAbins.out',
'TolueneSmallerOrderAbins.out',
'TolueneLargerOrderAbins.out',
'TolueneScale.out',
'TolueneScratchAbins.out',
'SingleCrystalDiffuseReduction_UB.mat',
'Na2SiF6_DMOL3.outmol',
'FE_ALPHA.cif',
'Fe-gamma.cif',
'Fe-alpha.cif',
'Sm2O3.cif',
'template_ENGINX_241391_236516_North_bank.prm'
]
EXPECTED_EXT = '.expected'
BANNED_REGEXP = [r'SANS2D\d+.log$',
r'SANS2D00000808_.+.txt$',
r'.*_reduction.log$',
r'.+_characterization_\d+_\d+_\d+.*\.txt',
r'.*\.cal',
r'.*\.detcal',
r'.*Grouping\.xml',
r'.*\.map',
r'.*\.irf',
r'.*\.hkl',
r'EVS.*\.raw',
r'.*_pulseid\.dat',
r'.*\.phonon']
# This list stores files that will be loaded first.
# Implemented as simple solution to avoid failures on
# WinXP where small files have trouble allocating larger
# amounts of contiguous memory.
# Usage of XP is getting lower so we don't want to compromise the
# performance of the code elsewhere just to pass here
PRIORITY_FILES = ['HYS_13658_event.nxs',
'ILLIN5_Sample_096003.nxs',
'ILLIN5_Vana_095893.nxs']
def useDir(direc):
"""Only allow directories that aren't test output or
reference results."""
if "reference" in direc:
return False
if config["defaultsave.directory"] == direc:
return False
return "Data" in direc
def useFile(direc, filename):
"""Returns (useFile, abspath)"""
# if it is an -stamp file then assume these are cmake created files
if filename.endswith("-stamp"):
return False, filename
# list of explicitly banned files at the top of this script
if filename in BANNED_FILES:
return False, filename
# is an 'expected' file
if filename.endswith(EXPECTED_EXT):
return False, filename
# list of banned files by regexp
for regexp in BANNED_REGEXP:
if re.match(regexp, filename, re.I) is not None:
return False, filename
filename = os.path.join(direc, filename)
if os.path.isdir(filename):
return False, filename
return True, filename
class LoadLotsOfFiles(stresstesting.MantidStressTest):
def __getDataFileList__(self):
# get a list of directories to look in
dirs = config['datasearch.directories'].split(';')
dirs = [item for item in dirs if useDir(item)]
print("Looking for data files in:", ', '.join(dirs))
# Files and their corresponding sizes. the low-memory win machines
# fair better loading the big files first
files = {}
priority_abspaths = copy.deepcopy(PRIORITY_FILES)
for direc in dirs:
myFiles = os.listdir(direc)
for filename in myFiles:
(good, fullpath) = useFile(direc, filename)
# print "***", good, filename
if good:
files[fullpath] = os.path.getsize(fullpath)
try:
cur_index = PRIORITY_FILES.index(filename)
priority_abspaths[cur_index] = fullpath
except ValueError:
pass
datafiles = sorted(files, key=lambda key: files[key], reverse=True)
# Put the priority ones first
for insertion_index, fname in enumerate(priority_abspaths):
try:
cur_index = datafiles.index(fname)
except ValueError:
continue
datafiles.pop(cur_index)
datafiles.insert(insertion_index, fname)
return datafiles
def __runExtraTests__(self, wksp, filename):
"""Runs extra tests that are specified in '.expected' files
next to the data files"""
expected = filename + EXPECTED_EXT
if not os.path.exists(expected): # file exists
return True
if os.path.getsize(expected) <= 0: # non-zero length
return True
# Eval statement will use current scope. Allow access to
# mantid module
import mantid # noqa
print("Found an expected file '%s' file" % expected)
expectedfile = open(expected)
tests = expectedfile.readlines()
failed = [] # still run all of the tests
for test in tests:
test = test.strip()
result = eval(test)
if not result:
failed.append((test, result))
if len(failed) > 0:
for item in failed:
print(" Failed test '%s' returned '%s' instead of 'True'" % (item[0], item[1]))
return False
return True
def __loadAndTest__(self, filename):
"""Do all of the real work of loading and testing the file"""
print("----------------------------------------")
print("Loading '%s'" % filename)
from mantid.api import Workspace
# Output can be a tuple if the Load algorithm has extra output properties
# but the output workspace should always be the first argument
outputs = Load(filename)
if isinstance(outputs, tuple):
wksp = outputs[0]
else:
wksp = outputs
if not isinstance(wksp, Workspace):
print("Unexpected output type from Load algorithm: Type found=%s" % str(type(outputs)))
return False
if wksp is None:
print('Load returned None')
return False
# generic checks
if wksp.name() is None or len(wksp.name()) <= 0:
print("Workspace does not have a name")
del wksp
return False
wid = wksp.id()
if wid is None or len(wid) <= 0:
print("Workspace does not have an id")
del wksp
return False
# checks based on workspace type
if hasattr(wksp, "getNumberHistograms"):
if wksp.getNumberHistograms() <= 0:
print("Workspace has zero histograms")
del wksp
return False
if "managed" not in wid.lower() and wksp.getMemorySize() <= 0:
print("Workspace takes no memory: Memory used=" + str(wksp.getMemorySize()))
del wksp
return False
# checks for EventWorkspace
if hasattr(wksp, "getNumberEvents"):
if wksp.getNumberEvents() <= 0:
print("EventWorkspace does not have events")
del wksp
return False
# do the extra checks
result = self.__runExtraTests__(wksp, filename)
# cleanup
del wksp
return result
def runTest(self):
"""Main entry point for the test suite"""
files = self.__getDataFileList__()
# run the tests
failed = []
for filename in files:
try:
if not self.__loadAndTest__(filename):
print("FAILED TO LOAD '%s'" % filename)
failed.append(filename)
except Exception as e:
print("FAILED TO LOAD '%s' WITH ERROR:" % filename)
print(e)
failed.append(filename)
finally:
# Clear everything for the next test
FrameworkManager.Instance().clear()
# final say on whether or not it 'worked'
print("----------------------------------------")
if len(failed) != 0:
print("SUMMARY OF FAILED FILES")
for filename in failed:
print(filename)
raise RuntimeError("Failed to load %d of %d files"
% (len(failed), len(files)))
else:
print("Successfully loaded %d files" % len(files))
def excludeInPullRequests(self):
return True
|
ScreamingUdder/mantid
|
Testing/SystemTests/tests/analysis/LoadLotsOfFiles.py
|
Python
|
gpl-3.0
| 12,957
|
[
"CASTEP",
"Gaussian"
] |
303efbafa5d691d270f963223db514b547717cd2acc85791d8281e24efbbc398
|
"""2015 SciPy John Hunter Excellence in Plotting Contest
Author: Robert Nikutta <robert.nikutta@gmail.com>
Title: Clustering of astronomical objects in WISE 3D color space
Based on: Nikutta, Hunt-Walker, Ivezic, Nenkova, Elitzur,
'The meaning of WISE colours - I. The Galaxy and its satellites',
MNRAS 442, 3361-3379 (2014)
http://dx.doi.org/10.1093/mnras/stu1087
http://adsabs.harvard.edu/abs/2014MNRAS.442.3361N
This stereoscopic plot (cross your eyes!) shows the distribution of
different types of astronomical objects in the 3D color space of the
WISE spacecraft (Wide-field Infrared Survey Explorer). Several classes
of objects are identified with differently colored dots. In
traditional 2D color-color plots clusters can overlap, making it
difficult to identify them. A 3D color-color plot, and especially a
stereoscopic view of it, provides a much more intuitive and immersive
experience.
Carbon-rich Asymptotic Giant Branch stars (AGB) are shown in
blue. Most of them are found in the Large Magellanic
Cloud. Oxygen-rich AGB stars are shown in red. Young Stellar Objects
(YSO) which are surrounded by dusty shells with constant radial
density profiles and small optical depths are shown in green. Both
cool (~600 Kelvin) and warm (~1200 Kelvin) shells fall in this
region. Warmer YSO shells of constant density fall in the the cluster
of orange color, but their optical depths are also higher. Finally,
small black dots show other astronomical objects in our Galaxy and its
satellites which have not been associated with the other
clusters. They are typically a mix of everything.
Example:
-------
import plot
F = plot.Figure(nxpix=1920) # full HD
F.make_stereoscopic_3d_scatter() # generates PNG file with default settings
"""
__author__ = 'Robert Nikutta <robert.nikutta@gmail.com>'
__version__ = '20150412'
import numpy as N
import pylab as p
import matplotlib
from matplotlib.gridspec import GridSpec
from mpl_toolkits.mplot3d import Axes3D
class Figure:
def __init__(self,nxpix=1280):
"""Generate a 3D stereoscopic view of ~15k WISE sources. Color
clusters of objects differently.
Parameters:
-----------
nxpix : int
Number of pixels of the output (PNG) file. An aspect ratio
of 16:9 is assumed.
"""
self.dpi = 100
self.aspect = 16./9.
self.ux = nxpix/float(self.dpi)
self.uy = self.ux/self.aspect
# Load data (WISE colors)
print "Loading data..."
with N.load('data.npz') as datafile:
self.x, self.y, self.z = datafile['x'], datafile['y'], datafile['z']
print "Number of objects: %d" % self.x.size
print "Done."
def make_stereoscopic_3d_scatter(self,azimuth=-18,saveformat='png'):
"""Generate two panels, 5 degrees apart in azimuth. Cross eyes for
stereoscopic view.
Parameters:
-----------
azimuth : {float,int}
The azimuth angle (in degrees) at which the camera views
the scene.
saveformat : str
Generate an output file, with the supplied azimuth in the
file name. Must be either 'png' (recommended, default) or
'pdf' (will be rather slow to save).
Returns:
--------
Nothing, but saves an output file.
"""
assert (saveformat in ('png','pdf')), "saveformat must be 'png' (recommended) or 'pdf' (will be very slow to save)."
filename = '3D_color_stereoscopic_az%07.2f.%s' % (azimuth,saveformat)
print "Generating plot %s" % filename
self.setup_figure(figsize=(self.ux,self.uy)) # width, height
# left panel (=right eye)
ax1 = p.subplot(self.gs3D[0],projection='3d',aspect='equal',axisbg='w')
plot_scatter_3D(self.fig,ax1,1,self.x,self.y,self.z,self.uy,azimuth=azimuth)
# right panel (=left eye)
ax2 = p.subplot(self.gs3D[1],projection='3d',aspect='equal',axisbg='w')
plot_scatter_3D(self.fig,ax2,2,self.x,self.y,self.z,self.uy,azimuth=azimuth-5)
if saveformat == 'png':
p.savefig(filename,dpi=100)
else:
p.savefig(filename)
p.close()
def make_movie_frames(self,azstart=1,azstop=10,azstep=1):
"""Helper function to generate frames (for a video) with varying
azimuth angle.
Parameters:
-----------
azstart, azstop, azstep : float-ables
The azimuth angles of first frame, last frame
(approximate), and of the step size. All in degrees. All
can be negative (determines direction of scene rotation)
"""
try:
azstart = float(azstart)
azstop = float(azstop)
azstep = float(azstep)
except ValueError:
raise Exception, "azstart, azstop, azstep must be convertible to a floating point number."
if azstop < azstart:
azstep = -N.abs(azstep)
allaz = N.arange(azstart,azstop,azstep)
for j,az in enumerate(allaz):
print "Generating frame file %d of %d" % (j+1,len(allaz))
self.make_stereoscopic_3d_scatter(azimuth=az)
def setup_figure(self,figsize):
"""Set up the figure and rc params."""
self.fontsize = 2*self.uy
p.rcParams['axes.labelsize'] = self.fontsize
p.rcParams['font.size'] = self.fontsize
p.rcParams['legend.fontsize'] = self.fontsize-2
p.rcParams['xtick.labelsize'] = self.fontsize
p.rcParams['ytick.labelsize'] = self.fontsize
self.fig = p.figure(figsize=figsize) # width, height 300dpi
self.fig.suptitle('Clustering of astronomical objects in WISE 3D color space\n(cross your eyes for stereoscopic view)',color='k',fontsize=self.fontsize+2)
# this will hold the 3D scatter plot
self.gs3D = GridSpec(1,2)
self.gs3D.update(left=0.02,right=0.98,bottom=0.,top=1.,wspace=0.05,hspace=0.)
def plot_scatter_3D(fig,ax,sid,x,y,z,unit,azimuth=-25):
# some constants
lo, hi = -0.5, 4 # plotting limits
s = unit/2.5 # standard marker size for scatter plot
# conditions to select groups of objects
coO = (x > 0.2) & (x < 2) & (y > 0.4) & (y < 2.2) & (z > 0) & (z < 1.3) & (z > 0.722*y - 0.289) # oxygen-rich AGN stars
coC = (x > 0.629*y - 0.198) & (x < 0.629*y + 0.359) & (z > 0.722*y - 0.911) & (z < 0.722*y - 0.289) # carbon-rich AGN stars
coCDSYSOcool = (x < 0.2) & (y < 0.4) # both cool & warm YSO shells w/ constant density profile & low optical depth
coCDSYSOwarm = (x > 0.3) & (x < 1.4) & (y > 1.4) & (y < 3.5) & (z > 1.5) & (z < 2.8) # warm YSO shells w/ constant density profile and high optical depth
coOTHER = ~(coO | coC | coCDSYSOcool | coCDSYSOwarm) # other/unidentified (a mix of everything)
groups = [coO,coC,coCDSYSOcool,coCDSYSOwarm,coOTHER]
# plot side panes
marker = 'o'
colors = ('r','#1A7EFF','g','#FFC81A','0.2') # red, blue, green, orange, very dark gray
alphas = [0.3]*len(groups)
sizes = [s,s,s,s,s/3.] # make 'other' apear a bit less prominent
for j,group in enumerate(groups):
cset = ax.scatter(x[group], y[group], lo, zdir='z', s=sizes[j], marker=marker, facecolors=colors[j], edgecolors=colors[j], linewidths=0., alpha=alphas[j])
cset = ax.scatter(y[group], z[group], hi, zdir='x', s=sizes[j], marker=marker, facecolors=colors[j], edgecolors=colors[j], linewidths=0., alpha=alphas[j])
cset = ax.scatter(x[group], z[group], hi, zdir='y', s=sizes[j], marker=marker, facecolors=colors[j], edgecolors=colors[j], linewidths=0., alpha=alphas[j])
# plot 3D clusters
# labels = ['O-rich AGB','C-rich AGB',r'cool YSO shells, $\rho(r)$=const.',r'warm YSO shells, $\rho(r)$=const., high optical depth','other']
alphas = [0.8,0.8,0.8,0.8,0.4] # make 'other' apear a bit less prominent
for j,group in enumerate(groups):
ax.scatter(x[group], y[group], z[group], s=sizes[j], marker=marker, facecolors=colors[j], edgecolors='w', linewidths=0.1, alpha=alphas[j])
# generate view
ax.view_init(elev=18, azim=azimuth)
# per-axis settings
for prop in ('w_xaxis','w_yaxis','w_zaxis'):
obj = getattr(ax,prop)
obj.set_pane_color((1,1,1,1.0))
obj.gridlines.set_lw(0.3)
obj._axinfo.update({'grid' : {'color': (0.5,0.5,0.5,1)}})
# final touch ups
ax.set_xlim(hi,lo)
ax.set_ylim(lo,hi)
ax.set_zlim(lo,hi)
ax.set_xticks((0,1,2,3,4))
ax.set_yticks((0,1,2,3,4))
ax.set_zticks((0,1,2,3,4))
ax.set_xlabel('W1 - W2 (mag)')
ax.set_ylabel('W2 - W3 (mag)')
ax.set_zlabel('W3 - W4 (mag)')
|
rnikutta/wise3Dstereoscopic
|
plot.py
|
Python
|
bsd-3-clause
| 8,856
|
[
"Galaxy"
] |
fbd257de313317a93bac16c380e5215a8a3458490e7c048a7048e7703fdbdfa9
|
"""semi-views for the `group_messaging` application
These are not really views - rather context generator
functions, to be used separately, when needed.
For example, some other application can call these
in order to render messages within the page.
Notice that :mod:`urls` module decorates all these functions
and turns them into complete views
"""
import copy
import datetime
from django.template.loader import get_template
from django.template import Context
from django.contrib.auth.models import User
from django.db import models
from django.forms import IntegerField
from django.http import HttpResponse
from django.http import HttpResponseNotAllowed
from django.http import HttpResponseForbidden
from django.utils import simplejson
from askbot.utils.views import PjaxView
from .models import Message
from .models import MessageMemo
from .models import SenderList
from .models import LastVisitTime
from .models import get_personal_group_by_user_id
from .models import get_personal_groups_for_users
class NewThread(PjaxView):
"""view for creation of new thread"""
http_method_list = ('POST',)
def post(self, request):
"""creates a new thread on behalf of the user
response is blank, because on the client side we just
need to go back to the thread listing view whose
content should be cached in the client'
"""
usernames = request.POST['to_usernames']
usernames = map(lambda v: v.strip(), usernames.split(','))
users = User.objects.filter(username__in=usernames)
missing = copy.copy(usernames)
for user in users:
if user.username in missing:
missing.remove(user.username)
result = dict()
if missing:
result['success'] = False
result['missing_users'] = missing
if request.user.username in usernames:
result['success'] = False
result['self_message'] = True
if result.get('success', True):
recipients = get_personal_groups_for_users(users)
message = Message.objects.create_thread(
sender=request.user,
recipients=recipients,
text=request.POST['text']
)
result['success'] = True
result['message_id'] = message.id
return HttpResponse(simplejson.dumps(result), content_type='application/json')
class PostReply(PjaxView):
"""view to create a new response"""
http_method_list = ('POST',)
def post(self, request):
parent_id = IntegerField().clean(request.POST['parent_id'])
parent = Message.objects.get(id=parent_id)
message = Message.objects.create_response(
sender=request.user,
text=request.POST['text'],
parent=parent
)
last_visit = LastVisitTime.objects.get(
message=message.root,
user=request.user
)
last_visit.at = datetime.datetime.now()
last_visit.save()
return self.render_to_response(
Context({'post': message, 'user': request.user}),
template_name='group_messaging/stored_message.html'
)
class ThreadsList(PjaxView):
"""shows list of threads for a given user"""
template_name = 'group_messaging/threads_list.html'
http_method_list = ('GET',)
def get_context(self, request, *args):
"""returns thread list data"""
if len(args):
user = args[0]
else:
user = request.user
#get threads and the last visit time
sender_id = IntegerField().clean(request.REQUEST.get('sender_id', '-1'))
if sender_id == -2:
threads = Message.objects.get_threads(
recipient=user,
deleted=True
)
elif sender_id == -1:
threads = Message.objects.get_threads(recipient=user)
elif sender_id == user.id:
threads = Message.objects.get_sent_threads(sender=user)
else:
sender = User.objects.get(id=sender_id)
threads = Message.objects.get_threads(
recipient=user,
sender=sender
)
threads = threads.order_by('-last_active_at')
#for each thread we need to know if there is something
#unread for the user - to mark "new" threads as bold
threads_data = dict()
for thread in threads:
thread_data = dict()
#determine status
thread_data['status'] = 'new'
#determine the senders info
senders_names = thread.senders_info.split(',')
if user.username in senders_names:
senders_names.remove(user.username)
thread_data['senders_info'] = ', '.join(senders_names)
thread_data['thread'] = thread
threads_data[thread.id] = thread_data
ids = [thread.id for thread in threads]
counts = Message.objects.filter(
id__in=ids
).annotate(
responses_count=models.Count('descendants')
).values('id', 'responses_count')
for count in counts:
thread_id = count['id']
responses_count = count['responses_count']
threads_data[thread_id]['responses_count'] = responses_count
last_visit_times = LastVisitTime.objects.filter(
user=user,
message__in=threads
)
for last_visit in last_visit_times:
thread_data = threads_data[last_visit.message_id]
if thread_data['thread'].last_active_at <= last_visit.at:
thread_data['status'] = 'seen'
return {
'threads': threads,
'threads_count': threads.count(),
'threads_data': threads_data,
'sender_id': sender_id
}
class DeleteOrRestoreThread(ThreadsList):
"""subclassing :class:`ThreadsList`, because deletion
or restoring of thread needs subsequent refreshing
of the threads list"""
http_method_list = ('POST',)
def post(self, request, thread_id=None):
"""process the post request:
* delete or restore thread
* recalculate the threads list and return it for display
by reusing the threads list "get" function
"""
#part of the threads list context
sender_id = IntegerField().clean(request.POST['sender_id'])
#a little cryptic, but works - sender_id==-2 means deleted post
if sender_id == -2:
action = 'restore'
else:
action = 'delete'
thread = Message.objects.get(id=thread_id)
memo, created = MessageMemo.objects.get_or_create(
user=request.user,
message=thread
)
if action == 'delete':
memo.status = MessageMemo.ARCHIVED
else:
memo.status = MessageMemo.SEEN
memo.save()
context = self.get_context(request)
return self.render_to_response(Context(context))
class SendersList(PjaxView):
"""shows list of senders for a user"""
template_name = 'group_messaging/senders_list.html'
http_method_names = ('GET',)
def get_context(self, request):
"""get data about senders for the user"""
senders = SenderList.objects.get_senders_for_user(request.user)
senders = senders.values('id', 'username')
return {'senders': senders, 'request_user_id': request.user.id}
class ThreadDetails(PjaxView):
"""shows entire thread in the unfolded form"""
template_name = 'group_messaging/thread_details.html'
http_method_names = ('GET',)
def get_context(self, request, thread_id=None):
"""shows individual thread"""
#todo: assert that current thread is the root
root = Message.objects.get(id=thread_id)
responses = Message.objects.filter(root__id=thread_id).order_by('sent_at')
last_visit, created = LastVisitTime.objects.get_or_create(
message=root,
user=request.user
)
root.mark_as_seen(request.user)
if created is False:
last_visit.at = datetime.datetime.now()
last_visit.save()
return {
'root_message': root,
'responses': responses,
'request': request
}
|
jesonyang001/qarepo
|
askbot/deps/group_messaging/views.py
|
Python
|
gpl-3.0
| 9,181
|
[
"VisIt"
] |
8df5e9d4866387eadf041c45b4dd57576c0b97d18831a551389e30c92b28d0e5
|
import ast
import _ast
from jaspyx.ast_util import ast_call, ast_load, ast_store
from jaspyx.context.block import BlockContext
from jaspyx.visitor import BaseVisitor
class TryExcept(BaseVisitor):
def visit_TryExcept(self, node):
if node.orelse:
raise NotImplementedError('Try-except else handler not implemented')
self.indent()
self.output('try')
self.block(node.body, context=BlockContext(self.stack[-1]))
self.output(' catch($e) ')
self.push(BlockContext(self.stack[-1]))
if_start = None
if_end = None
for handler in node.handlers:
if handler.type is not None:
if handler.name is not None:
body = handler.body[:]
body.insert(0, ast.Assign(
[handler.name],
ast_call(ast_load('JS'), ast.Str('$e'))
))
else:
body = handler.body
types = [handler.type] if isinstance(handler.type, _ast.Name) else handler.type
conditions = [
ast_call(
ast_load('isinstance'),
ast_call(ast_load('JS'), ast.Str('$e')),
type_,
)
for type_ in types
]
_if = ast.If(
ast.BoolOp(ast.Or(), conditions),
body,
[]
)
if if_start is None:
if_start = if_end = _if
else:
if_end.orelse, if_end = [_if], _if
else:
if handler is not node.handlers[-1]:
raise SyntaxError("default 'except:' must be last")
if if_start is None:
self.block(handler.body)
else:
if_end.orelse = handler.body
if if_start is not None:
self.visit(if_start)
self.pop()
self.output('\n')
|
iksteen/jaspyx
|
jaspyx/visitor/tryexcept.py
|
Python
|
mit
| 2,092
|
[
"VisIt"
] |
774977a67d7bfa55b3240219c9d8a35a0c8489ec768291b2a2471e1b199b7d88
|
import email.parser
import email.policy
import email.utils
import typing
from email.message import EmailMessage
from itertools import chain
from textwrap import dedent
import pytest
from riggerlib import recursive_update
from widgetastic_patternfly import CheckableBootstrapTreeview as Check_tree
from cfme import test_requirements
from cfme.cloud.provider import CloudProvider
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.provider.gce import GCEProvider
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.scvmm import SCVMMProvider
from cfme.markers.env_markers.provider import providers
from cfme.utils import normalize_text
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.blockers import GH
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.providers import ProviderFilter
from cfme.utils.update import update
from cfme.utils.wait import wait_for
Checker = typing.NewType('Checker', typing.Callable[[object], bool])
pytestmark = [
pytest.mark.meta(server_roles="+automate +notifier"),
test_requirements.provision,
pytest.mark.tier(2),
pytest.mark.provider(gen_func=providers,
filters=[ProviderFilter(classes=[CloudProvider, InfraProvider],
required_flags=['provision'])],
scope="function"),
pytest.mark.usefixtures('setup_provider')
]
@pytest.fixture()
def vm_name():
return random_vm_name(context='prov', max_length=12)
@pytest.fixture()
def instance_args(request, provider, provisioning, vm_name):
""" Fixture to prepare instance parameters for provisioning
"""
inst_args = dict(template_name=provisioning.get('image', {}).get('name') or provisioning.get(
'template'))
if not inst_args.get('template_name'):
pytest.skip(reason='template name not specified in the provisioning in config')
# Base instance info
inst_args['request'] = {
'notes': 'Testing provisioning from image {} to vm {} on provider {}'
.format(inst_args.get('template_name'), vm_name, provider.key),
}
# Check whether auto-selection of environment is passed
auto = False # By default provisioning will be manual
try:
parameter = request.param
auto = parameter
except AttributeError:
# in case nothing was passed just skip
pass
if auto:
inst_args.update({'environment': {'automatic_placement': auto}})
yield vm_name, inst_args
@pytest.fixture
def provisioned_instance(provider, instance_args, appliance):
""" Checks provisioning status for instance """
vm_name, inst_args = instance_args
collection = appliance.provider_based_collection(provider)
instance = collection.create(vm_name, provider, form_values=inst_args)
if not instance:
raise Exception("instance returned by collection.create is 'None'")
yield instance
logger.info('Instance cleanup, deleting %s', instance.name)
try:
instance.cleanup_on_provider()
except Exception as ex:
logger.warning('Exception while deleting instance fixture, continuing: {}'
.format(ex.message))
@pytest.mark.meta(automates=[1830305])
@pytest.mark.parametrize('instance_args', [True, False], ids=["Auto", "Manual"], indirect=True)
def test_provision_from_template(provisioned_instance):
""" Tests instance provision from template via CFME UI
Metadata:
test_flag: provision
Bugzilla:
1830305
Polarion:
assignee: jhenner
caseimportance: critical
casecomponent: Provisioning
initialEstimate: 1/4h
"""
assert provisioned_instance.exists_on_provider, "Instance wasn't provisioned successfully"
@pytest.mark.provider([GCEProvider], required_fields=[['provisioning', 'image']])
@pytest.mark.usefixtures('setup_provider')
def test_gce_preemptible_provision(appliance, provider, instance_args, soft_assert):
"""
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/6h
"""
vm_name, inst_args = instance_args
inst_args['properties']['is_preemptible'] = True
instance = appliance.collections.cloud_instances.create(vm_name,
provider,
form_values=inst_args)
view = navigate_to(instance, "Details")
preemptible = view.entities.summary("Properties").get_text_of("Preemptible")
soft_assert('Yes' in preemptible, "GCE Instance isn't Preemptible")
soft_assert(instance.exists_on_provider, "Instance wasn't provisioned successfully")
def _post_approval(smtp_test, provision_request, vm_type, requester, provider, approved_vm_names):
# requester includes the trailing space
approved_subject = normalize_text(f"your {vm_type} request was approved")
approved_from = normalize_text(f"{vm_type} request from {requester}was approved")
wait_for_messages_with_subjects(smtp_test, {approved_subject, approved_from}, num_sec=90)
smtp_test.clear_database()
# Wait for the VM to appear on the provider backend before proceeding
# to ensure proper cleanup
logger.info('Waiting for vms %s to appear on provider %s',
", ".join(approved_vm_names), provider.key)
wait_for(
lambda: all(map(provider.mgmt.does_vm_exist, approved_vm_names)),
handle_exception=True,
num_sec=600
)
provision_request.wait_for_request(method='ui')
msg = f"Provisioning failed with the message {provision_request.row.last_message.text}."
assert provision_request.is_succeeded(method='ui'), msg
# account for multiple vms, specific names
completed_subjects = {
normalize_text(f"your {vm_type} request has completed vm name {name}")
for name in approved_vm_names
}
wait_for_messages_with_subjects(smtp_test, completed_subjects, num_sec=90)
def wait_for_messages_with_subjects(smtp_test, expected_subjects_substrings, num_sec):
""" This waits for all the expected subjects to be present the list of received
mails with partial match.
"""
expected_subjects_substrings = set(expected_subjects_substrings)
def _check_subjects():
subjects = {normalize_text(m["subject"]) for m in smtp_test.get_emails()}
found_subjects_substrings = set()
# Looking for each expected subject in the list of received subjects with partial match
for expected_substring in expected_subjects_substrings:
for subject in subjects:
if expected_substring in subject:
found_subjects_substrings.add(expected_substring)
break
else:
logger.info('No emails with subjects containing "%s" found.', expected_substring)
if expected_subjects_substrings - found_subjects_substrings:
return False
logger.info('Found all expected emails.')
return True
wait_for(_check_subjects, num_sec=num_sec, delay=3,
message='Some expected subjects not found in the received emails subjects.')
def multichecker_factory(all_checkers: typing.Iterable[Checker]) -> Checker:
all_checkers = tuple(all_checkers)
def _item_checker(item):
logger.debug(f'Checking: {item}')
for checker in all_checkers:
if not checker(item):
logger.debug(f'Failure from checker: {checker}.')
return False
else:
logger.debug(f'Success from checker: {checker}.')
return True
return _item_checker
class AddressHeaderChecker:
def __init__(self, example: EmailMessage, checked_field: str):
self.checked_field = checked_field
self.example_values = self.normalized_field_vals(example)
def normalized_field_vals(self, eml: EmailMessage):
addresses = eml.get_all(self.checked_field)
assert addresses
return {a[1] for a in email.utils.getaddresses(addresses)}
def __call__(self, received_email: EmailMessage) -> bool:
found_values = self.normalized_field_vals(received_email)
if not found_values == self.example_values:
logger.debug(f"Field {self.checked_field} values {found_values} "
f"are different to expected {self.example_values}.")
return False
return True
def __str__(self):
return f'{self.__class__.__name__}<{self.checked_field}, {self.example_values}>'
# Note the Bcc is not present in the received email. It is a part of rcpttos.
ADDRESS_FIELDS = "From To Cc rcpttos".split()
def wait_for_expected_email_arrived(smtp, subject, example, num_sec, delay):
eml_checker = (multichecker_factory(AddressHeaderChecker(example, field)
for field in ADDRESS_FIELDS))
def _email_message_with_rcpttos_header(eml):
msg = email.message_from_string(eml['data'], policy=email.policy.strict)
msg.add_header('rcpttos', ', '.join(eml['rcpttos']))
return msg
def _expected_email_arrived():
emails = smtp.get_emails()
messages = (_email_message_with_rcpttos_header(m)
for m in emails if subject in normalize_text(m['subject']))
return all(eml_checker(m) for m in messages)
wait_for(_expected_email_arrived, num_sec=num_sec, delay=delay)
@pytest.fixture(scope='module')
def email_addresses_configuration(request, domain):
original_instance = (
domain.appliance.collections.domains.instantiate("ManageIQ")
.namespaces.instantiate("Configuration")
.classes.instantiate("Email")
.instances.instantiate("Default")
)
original_instance.copy_to(domain=domain)
email_configuration = (
domain.namespaces.instantiate('Configuration')
.classes.instantiate('Email')
.instances.instantiate('Default')
)
test_data = {
'default_recipient': ('default_recipient@example.com',),
'approver': ('approver@example.com',),
'cc': ('first-cc@example.com', 'second-cc@example.com',),
'bcc': ('first-bcc@example.com', 'second-bcc@example.com'),
'from': ('from@example.com',),
}
with update(email_configuration):
email_configuration.fields = {k: {'value': ', '.join(v)} for k, v in test_data.items()}
request.addfinalizer(email_configuration.delete_if_exists)
yield test_data
@pytest.mark.meta(automates=[1472844, 1676910, 1818172, 1380197, 1688500, 1702304, 1783511,
GH(('ManageIQ/manageiq', 20260))])
@pytest.mark.parametrize("action", ["edit", "approve", "deny"])
def test_provision_approval(appliance, provider, vm_name, smtp_test, request,
action, soft_assert, email_addresses_configuration):
""" Tests provisioning approval. Tests couple of things.
* Approve manually
* Approve by editing the request to conform
Prerequisites:
* A provider that can provision.
* Automate role enabled
* User with e-mail set so you can receive and view them
Steps:
* Create a provisioning request that does not get automatically approved (eg. ``num_vms``
bigger than 1)
* Wait for an e-mail to come, informing you that approval is pending
* Depending on whether you want to do:
* approve: manually approve the request in UI
* edit: Edit the request in UI so it conforms the rules for auto-approval.
* deny: Deny the request in UI.
* Wait for an e-mail with approval
* Wait until the request finishes
* Wait until an email with provisioning complete
Metadata:
test_flag: provision
suite: infra_provisioning
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/8h
Bugzilla:
1472844
1676910
1380197
1818172
"""
# generate_tests makes sure these have values
# template, host, datastore = map(provisioning.get, ('template', 'host', 'datastore'))
# It will provision two of them
# All the subject checks are normalized, because of newlines and capitalization
vm_names = [vm_name + "001", vm_name + "002"]
requester = "vm_provision@cfmeqe.com " # include trailing space for clean formatting
if provider.one_of(CloudProvider):
vm_type = "instance"
else:
vm_type = "virtual machine"
collection = appliance.provider_based_collection(provider)
inst_args = {
'catalog': {
'vm_name': vm_name,
'num_vms': '2'
}
}
vm = collection.create(vm_name, provider, form_values=inst_args, wait=False)
pending_subject = normalize_text(f"your {vm_type} request is pending")
# requester includes the trailing space
pending_from = normalize_text(f"{vm_type} request from {requester}pending approval")
def msg_from_dict(msg_dict) -> EmailMessage:
to, = (msg_dict['default_recipient']
if GH(('ManageIQ/manageiq', 20260)).blocks
else msg_dict['approver'])
msg = EmailMessage()
msg.add_header('from', ', '.join(msg_dict['from']))
msg.add_header('cc', ', '.join(msg_dict['cc']))
msg.add_header('to', to)
msg.add_header('rcpttos', ', '.join(chain(msg_dict['cc'], msg_dict['bcc'], [to])))
return msg
wait_for_messages_with_subjects(smtp_test, {pending_subject, pending_from}, num_sec=90)
SUBJ_APPR_PENDING = normalize_text(f'Instance Request from {requester} Pending Approval')
wait_for_expected_email_arrived(smtp_test, SUBJ_APPR_PENDING,
msg_from_dict(email_addresses_configuration), num_sec=1, delay=0)
smtp_test.clear_database()
cells = {'Description': f'Provision from [{vm.template_name}] to [{vm.name}###]'}
def _action_edit():
# Automatic approval after editing the request to conform
new_vm_name = f'{vm_name}-xx'
modifications = {
'catalog': {
'num_vms': "1",
'vm_name': new_vm_name
},
'Description': f'Provision from [{vm.template_name}] to [{new_vm_name}]'
}
provision_request = appliance.collections.requests.instantiate(cells=cells)
provision_request.edit_request(values=modifications)
vm_names = [new_vm_name] # Will be just one at this moment
request.addfinalizer(
lambda: collection.instantiate(new_vm_name, provider).cleanup_on_provider()
)
_post_approval(smtp_test, provision_request, vm_type, requester, provider, vm_names)
def _action_approve():
# Manual approval
provision_request = appliance.collections.requests.instantiate(cells=cells)
provision_request.approve_request(method='ui', reason="Approved")
for v_name in vm_names:
request.addfinalizer(
lambda: (appliance.collections.infra_vms.instantiate(v_name, provider)
.cleanup_on_provider()))
_post_approval(smtp_test, provision_request, vm_type, requester, provider, vm_names)
def _action_deny():
provision_request = appliance.collections.requests.instantiate(cells=cells)
provision_request.deny_request(method='ui', reason="You stink!")
denied_subject = normalize_text(f"your {vm_type} request was denied")
denied_from = normalize_text(f"{vm_type} request from {requester}was denied")
wait_for_messages_with_subjects(smtp_test, [denied_subject, denied_from], num_sec=90)
# Call function doing what is necessary -- Variation of Strategy design pattern.
action_callable = locals().get(f'_action_{action}')
if not action_callable:
raise NotImplementedError(f'Action {action} is not known to this test.')
action_callable()
@test_requirements.rest
@pytest.mark.parametrize('auto', [True, False], ids=["Auto", "Manual"])
@pytest.mark.meta(blockers=[
BZ(1720751, unblock=lambda provider: not provider.one_of(SCVMMProvider))
])
def test_provision_from_template_using_rest(appliance, request, provider, vm_name, auto):
""" Tests provisioning from a template using the REST API.
Metadata:
test_flag: provision, rest
Polarion:
assignee: pvala
casecomponent: Provisioning
caseimportance: high
initialEstimate: 1/30h
"""
if auto:
form_values = {"vm_fields": {"placement_auto": True}}
else:
form_values = None
collection = appliance.provider_based_collection(provider)
instance = collection.create_rest(vm_name, provider, form_values=form_values)
wait_for(
lambda: instance.exists,
num_sec=1000, delay=5, message=f"VM {vm_name} becomes visible"
)
@request.addfinalizer
def _cleanup():
logger.info('Instance cleanup, deleting %s', instance.name)
try:
instance.cleanup_on_provider()
except Exception as ex:
logger.warning('Exception while deleting instance fixture, continuing: {}'
.format(ex.message))
@pytest.fixture(scope="module")
def original_request_class(appliance):
return (appliance.collections.domains.instantiate(name='ManageIQ')
.namespaces.instantiate(name='Cloud')
.namespaces.instantiate(name='VM')
.namespaces.instantiate(name='Provisioning')
.namespaces.instantiate(name='StateMachines')
.classes.instantiate(name='Methods'))
@pytest.fixture(scope="module")
def modified_request_class(request, domain, original_request_class):
original_request_class.copy_to(domain)
klass = (domain
.namespaces.instantiate(name='Cloud')
.namespaces.instantiate(name='VM')
.namespaces.instantiate(name='Provisioning')
.namespaces.instantiate(name='StateMachines')
.classes.instantiate(name='Methods'))
request.addfinalizer(klass.delete_if_exists)
return klass
@pytest.fixture(scope="module")
def copy_domains(original_request_class, domain):
methods = ['openstack_PreProvision', 'openstack_CustomizeRequest']
for method in methods:
original_request_class.methods.instantiate(name=method).copy_to(domain)
# Not collected for EC2 in generate_tests above
@pytest.mark.meta(automates=[BZ(1713632)])
@pytest.mark.parametrize("disks", [1, 2])
@pytest.mark.provider([OpenStackProvider], required_fields=[['provisioning', 'image']])
def test_cloud_provision_from_template_with_attached_disks(
appliance, request, instance_args, provider, disks, soft_assert, domain,
modified_request_class, copy_domains, provisioning):
""" Tests provisioning from a template and attaching disks
Metadata:
test_flag: provision
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/4h
Bugzilla:
1713632
"""
vm_name, inst_args = instance_args
# Modify availiability_zone for Azure provider
if provider.one_of(AzureProvider):
recursive_update(inst_args, {'environment': {'availability_zone': provisioning("av_set")}})
device_name = "vd{}"
device_mapping = []
volumes = provider.mgmt.volume_configurations(1, n=disks)
@request.addfinalizer
def delete_volumes():
for volume in volumes:
provider.mgmt.delete_volume(volume)
# Set up automate
for i, volume in enumerate(volumes, 0):
# note the boot_index specifies an ordering in which the disks are tried to
# boot from. The value -1 means "never".
device_mapping.append(
{'boot_index': 0 if i == 0 else -1,
'uuid': volume,
'device_name': device_name.format(chr(ord("a") + i))})
if i == 0:
provider.mgmt.capi.volumes.set_bootable(volume, True)
method = modified_request_class.methods.instantiate(name="openstack_PreProvision")
view = navigate_to(method, 'Details')
former_method_script = view.script.get_value()
disk_mapping = []
for mapping in device_mapping:
one_field = dedent("""{{
:boot_index => {boot_index},
:uuid => "{uuid}",
:device_name => "{device_name}",
:source_type => "volume",
:destination_type => "volume",
:volume_size => 1,
:delete_on_termination => false
}}""")
disk_mapping.append(one_field.format(**mapping))
volume_method = dedent("""
clone_options = {{
:image_ref => nil,
:block_device_mapping_v2 => [
{}
]
}}
prov = $evm.root["miq_provision"]
prov.set_option(:clone_options, clone_options)
""")
with update(method):
method.script = volume_method.format(",\n".join(disk_mapping))
@request.addfinalizer
def _finish_method():
with update(method):
method.script = former_method_script
instance = appliance.collections.cloud_instances.create(vm_name,
provider,
form_values=inst_args)
@request.addfinalizer
def delete_vm_and_wait_for_gone():
instance.cleanup_on_provider()
wait_for(lambda: not instance.exists_on_provider, num_sec=180, delay=5)
for volume_id in volumes:
attachments = provider.mgmt.volume_attachments(volume_id)
soft_assert(
vm_name in attachments,
'The vm {} not found among the attachemnts of volume {}:'.format(
vm_name, volume_id, attachments))
for device in device_mapping:
provider_devpath = provider.mgmt.volume_attachments(device['uuid'])[vm_name]
expected_devpath = '/dev/{}'.format(device['device_name'])
soft_assert(
provider_devpath == expected_devpath,
'Device {} is not attached to expected path: {} but to: {}'.format(
device['uuid'], expected_devpath, provider_devpath))
# Not collected for EC2 in generate_tests above
@pytest.mark.meta(blockers=[BZ(1746931)])
@pytest.mark.provider([OpenStackProvider], required_fields=[['provisioning', 'image']])
def test_provision_with_boot_volume(request, instance_args, provider, soft_assert,
modified_request_class, appliance, copy_domains):
""" Tests provisioning from a template and attaching one booting volume.
Metadata:
test_flag: provision, volumes
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/4h
"""
vm_name, inst_args = instance_args
image = inst_args.get('template_name')
volume = provider.mgmt.create_volume(1, imageRef=provider.mgmt.get_template(image).uuid)
request.addfinalizer(lambda: provider.mgmt.delete_volume(volume))
# Set up automate
method = modified_request_class.methods.instantiate(name="openstack_CustomizeRequest")
view = navigate_to(method, 'Details')
former_method_script = view.script.get_value()
with update(method):
method.script = dedent('''\
$evm.root["miq_provision"].set_option(
:clone_options, {{
:image_ref => nil,
:block_device_mapping_v2 => [{{
:boot_index => 0,
:uuid => "{}",
:device_name => "vda",
:source_type => "volume",
:destination_type => "volume",
:volume_size => 1,
:delete_on_termination => false
}}]
}}
)
'''.format(volume))
@request.addfinalizer
def _finish_method():
with update(method):
method.script = former_method_script
instance = appliance.collections.cloud_instances.create(vm_name,
provider,
form_values=inst_args)
@request.addfinalizer
def delete_vm_and_wait_for_gone():
instance.cleanup_on_provider() # To make it possible to delete the volume
wait_for(lambda: not instance.exists_on_provider, num_sec=180, delay=5)
request_description = f'Provision from [{image}] to [{instance.name}]'
provision_request = appliance.collections.requests.instantiate(request_description)
provision_request.wait_for_request(method='ui')
msg = "Provisioning failed with the message {}".format(
provision_request.row.last_message.text)
assert provision_request.is_succeeded(method='ui'), msg
soft_assert(instance.name in provider.mgmt.volume_attachments(volume))
soft_assert(provider.mgmt.volume_attachments(volume)[instance.name] == "/dev/vda")
# Not collected for EC2 in generate_tests above
@pytest.mark.meta(blockers=[BZ(1746931)])
@pytest.mark.provider([OpenStackProvider], required_fields=[['provisioning', 'image']])
def test_provision_with_additional_volume(request, instance_args, provider, small_template,
soft_assert, modified_request_class, appliance,
copy_domains):
""" Tests provisioning with setting specific image from AE and then also making it create and
attach an additional 3G volume.
Metadata:
test_flag: provision, volumes
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/4h
"""
vm_name, inst_args = instance_args
# Set up automate
method = modified_request_class.methods.instantiate(name="openstack_CustomizeRequest")
try:
image_id = provider.mgmt.get_template(small_template.name).uuid
except KeyError:
pytest.skip("No small_template in provider data!")
view = navigate_to(method, 'Details')
former_method_script = view.script.get_value()
with update(method):
method.script = dedent('''\
$evm.root["miq_provision"].set_option(
:clone_options, {{
:image_ref => nil,
:block_device_mapping_v2 => [{{
:boot_index => 0,
:uuid => "{}",
:device_name => "vda",
:source_type => "image",
:destination_type => "volume",
:volume_size => 3,
:delete_on_termination => false
}}]
}}
)
'''.format(image_id))
@request.addfinalizer
def _finish_method():
with update(method):
method.script = former_method_script
def cleanup_and_wait_for_instance_gone():
instance.mgmt.refresh()
prov_instance_raw = instance.mgmt.raw
instance_volumes = getattr(prov_instance_raw, 'os-extended-volumes:volumes_attached')
instance.cleanup_on_provider()
wait_for(lambda: not instance.exists_on_provider, num_sec=180, delay=5)
# Delete the volumes.
for volume in instance_volumes:
provider.mgmt.delete_volume(volume['id'])
instance = appliance.collections.cloud_instances.create(
vm_name, provider, form_values=inst_args)
request.addfinalizer(cleanup_and_wait_for_instance_gone)
request_description = f'Provision from [{small_template.name}] to [{instance.name}]'
provision_request = appliance.collections.requests.instantiate(request_description)
try:
provision_request.wait_for_request(method='ui')
except Exception as e:
logger.info(
f"Provision failed {e}: {provision_request.request_state}")
raise
assert provision_request.is_succeeded(method='ui'), (
"Provisioning failed with the message {}".format(
provision_request.row.last_message.text))
instance.mgmt.refresh()
prov_instance_raw = instance.mgmt.raw
assert hasattr(prov_instance_raw, 'os-extended-volumes:volumes_attached')
volumes_attached = getattr(prov_instance_raw, 'os-extended-volumes:volumes_attached')
assert len(volumes_attached) == 1
volume_id = volumes_attached[0]["id"]
assert provider.mgmt.volume_exists(volume_id)
volume = provider.mgmt.get_volume(volume_id)
assert volume.size == 3
@test_requirements.tag
def test_provision_with_tag(appliance, vm_name, tag, provider, request):
""" Tests tagging instance using provisioning dialogs.
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, pick a tag.
* Submit the provisioning request and wait for it to finish.
* Visit instance page, it should display the selected tags
Metadata:
test_flag: provision
Polarion:
assignee: anikifor
casecomponent: Tagging
initialEstimate: 1/4h
"""
inst_args = {'purpose': {
'apply_tags': Check_tree.CheckNode(
[f'{tag.category.display_name} *', tag.display_name])}}
collection = appliance.provider_based_collection(provider)
instance = collection.create(vm_name, provider, form_values=inst_args)
request.addfinalizer(instance.cleanup_on_provider)
assert tag in instance.get_tags(), 'Provisioned instance does not have expected tag'
@pytest.mark.tier(2)
@test_requirements.multi_region
@test_requirements.provision
@pytest.mark.long_running
def test_provision_from_template_from_global_region(setup_multi_region_cluster,
multi_region_cluster,
activate_global_appliance,
setup_remote_provider,
provisioned_instance):
"""
Polarion:
assignee: tpapaioa
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/10h
"""
assert provisioned_instance.exists_on_provider, "Instance wasn't provisioned successfully"
@pytest.mark.manual
@pytest.mark.meta(coverage=[1670327])
def test_provision_service_dialog_details():
""" Test whether the details of provision request can be displayed.
Prerequisities:
* A Local/Global replicated CFMEs.
* A provider that can provision.
Steps:
* Add repository and create a service catalog with a dialog at remote region
* Try provisioning the catalog from Global Region
* You can see the dialog details in Services -> Requests page
Expected results:
The dialog details at Services -> Requests should be displayed when
ordering the catalog from the Global Region
Polarion:
assignee: jhenner
caseimportance: medium
casecomponent: Provisioning
initialEstimate: 1/6h
"""
pass
|
nachandr/cfme_tests
|
cfme/tests/cloud_infra_common/test_provisioning.py
|
Python
|
gpl-2.0
| 31,446
|
[
"VisIt"
] |
7bfac3137746887a6cc62903737debb677a5c28931d4abdebcce0ac2603a4acb
|
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import vericred_client
from vericred_client.rest import ApiException
from vericred_client.models.zip_county import ZipCounty
class TestZipCounty(unittest.TestCase):
""" ZipCounty unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testZipCounty(self):
"""
Test ZipCounty
"""
model = vericred_client.models.zip_county.ZipCounty()
if __name__ == '__main__':
unittest.main()
|
vericred/vericred-python
|
test/test_zip_county.py
|
Python
|
apache-2.0
| 9,981
|
[
"VisIt"
] |
86ed183e6c519289a4988b7f891d103097d8b2c0c30e119a4168f707378dff80
|
#!/usr/bin/env python
"""
Michael Lerner's hbond analysis, modified by Steve Spronk
Right now, just handles pasting together ptraj output.
"""
import copy,pprint,os,sys
from scipy import sqrt
from string import ascii_letters
from .hbond_tool_utils import *
class Atom:
def __init__(self, atom_name = None, resi_name = None, resi_num = None):
"""
Solvent atoms will have atom OW or HW and resi WAT.
"""
self.atom_name = atom_name
self.resi_name = resi_name
self.resi_num = resi_num
def __eq__(self, other):
return self.atom_name == other.atom_name and \
self.resi_name == other.resi_name and \
self.resi_num == other.resi_num
def __ne__(self, other):
return not (self == other)
class HBond:
"""
Class to provide a mechanism for handing data contained in the output
from ptraj
"""
# ---------------
# Initializations
# ---------------
def __init__(self, line = None, segment_size = 1000, resi_map = None):
'''
Initialize ourself from a line that looks like this:
DONOR ACCEPTORH ACCEPTOR
atom# :res@atom atom# :res@atom atom# :res@atom %occupied distance angle lifetime maxocc
| 2546 :160@OA23| 1018 :63@HG 1017 :63@OG | 99.50 2.641 ( 0.10) 20.89 ( 9.75) 100.0 ( 47.0) 147 |@@@*@@@@@|
| 2545 :160@OA22| 705 :44@HH22 703 :44@NH2 | 98.51 2.756 (10.09) 17.97 (19.79) 99.0 (127.0) 126 |*@@@*@@@@|
| solvent donor | 127 :9@HD21 126 :9@ND2 | 2.00 3.193 ( 0.00) 46.59 ( 0.01) 2.0 ( 0.0) 1 | . |
| 5612 :361@OG | solvent acceptor | 2.00 2.915 ( 0.00) 11.31 ( 0.00) 2.0 ( 0.0) 1 | . |
The numbers in parentheses are standard deviations.
Here is a note from cheatham (http://amber.scripps.edu/Questions/mail/322.html)::
The maxocc is the maximum number of consecutive frames that the
interaction is found in the trajectory (i.e. 39 consecutive frames).
The lifetime is the average time an interaction occurred...
For example, assume that each space below represents 1ps and a star
means it is occupied:
10 20 30 40 50
***** ***** ********** *****|
The occupancy would be 5 + 5 + 10 + 5 / 50 or 50%
The maxocc would be 10
The lifetime would be 5 + 5 + 10 + 5 / 4 = 6.25 ps (assuming 1 ps between
frames; the time per frame can be specified on the hbond command line)
Adding hbonds only works for some attributes (occupancy, distance, distance
standard deviation, angle, angle standard deviation, and graph).
But because we have split the trajectory into segments, the lifetime and maxocc
are not truly a reflection of the H-bonds across the whole trajectory.
Therefore the manipulation of lifetime and maxocc data are not implemented
in the current version of hbond_analysis.
'''
# num_frames tells us how many frames have been added together.
self.num_frames = segment_size
if line is None:
self.donor = Atom()
self.acceptorh = Atom()
self.acceptor = Atom()
self.occ_pct = self.occ_num = self.dist = self.dist_stdev = self.angle = self.angle_stdev = 0.0
self.graph = ' '
return
line = line.strip()
try:
leading_junk, donor, acceptor, stats, graph, trailing_junk = line.split('|')
except ValueError:
print("Could not hbond", line)
raise
# Parse line:
self.donor = self._ptraj_hbond_chunk_to_atom(donor, resi_map)
self.acceptorh = self._ptraj_hbond_chunk_to_atom(' '.join(acceptor.split()[:2]), resi_map)
self.acceptor = self._ptraj_hbond_chunk_to_atom(' '.join(acceptor.split()[2:]), resi_map)
occ_pct,dist = stats.split('(')[0].strip().split()
dist_stdev = stats.split('(')[1].split(')')[0].strip()
angle = stats.split(')')[1].split('(')[0].strip()
angle_stdev = stats.split('(')[2].split(')')[0].strip()
# Make necessary type adjustments and calculations
self.occ_pct,self.dist,self.dist_stdev,self.angle,self.angle_stdev = [float(i) for i in (occ_pct,dist,dist_stdev,angle,angle_stdev)]
self.graph = graph
self.occ_num = int(round(self.occ_pct / 100.0 * self.num_frames))
if self.occ_num < 2:
self.dist_stdev = self.angle_stdev = 0.0
self.straight_from_ptraj = True
def _ptraj_hbond_chunk_to_atom(self, chunk, resi_map = None):
''' chunk is something like " 2546 :160@OA23 " '''
if chunk.strip() in ('solvent donor', ''):
return Atom(atom_name = 'OW', resi_name = 'Wat', resi_num = 999999)
elif chunk.strip() == 'solvent acceptor':
return Atom(atom_name = 'HW', resi_name = 'Wat', resi_num = 999999)
else:
resi_name = chunk.split(':')[1].split('@')[0].strip()
if resi_map != None:
resi_name = resi_map[int(resi_name)]
try:
resi_num = int(resi_name) # no aa code
except ValueError:
if resi_name[1] in ascii_letters: # 3-letter aa code
resi_num = int(resi_name[3:])
else: # 1-letter aa code
resi_num = int(resi_name[1:])
else:
resi_num = int(resi_name)
atom_name = chunk.split(':')[1].split('@')[1].strip()
return Atom(atom_name, resi_name, resi_num)
def init_from_atomstr(self, s, segment_size = 1000):
'''
atomstr looks like what is returned by self._atom_str:
102 NH1--HH11 ... O 88
Tyr71 OH--HH ... OG1 Asp228
'''
a_resi, a_atom, ah_atom, dots, d_atom, d_resi = s.replace('--',' ').split()
if a_resi == 'Wat':
a_resi_num = 999999
else:
try:
a_resi_num = int(a_resi) # no aa code
except ValueError:
if a_resi[1] in ascii_letters: # 3-letter aa code
a_resi_num = int(a_resi[3:])
else: # 1-letter aa code
a_resi_num = int(a_resi[1:])
if d_resi == 'Wat': # Same for donor atom
d_resi_num = 999999
else:
try:
d_resi_num = int(d_resi)
except ValueError:
if d_resi[1] in ascii_letters:
d_resi_num = int(d_resi[3:])
else:
d_resi_num = int(d_resi[1:])
self.donor = Atom( d_atom, d_resi, d_resi_num)
self.acceptor = Atom( a_atom, a_resi, a_resi_num)
self.acceptorh = Atom(ah_atom, a_resi, a_resi_num) # H is always in same residue as heavy atom it's bonded to
self.num_frames = segment_size
self.occ_num = 0
self.occ_pct = self.dist = self.dist_stdev = self.angle = self.angle_stdev = 0.0
self.graph = ' '
self.straight_from_ptraj = True
def init_from_str(self, s):
"""
str looks like what is output by __str__ :
Lys142 NZ--HZ3 ... OE1 Glu134 27.80( 2500) |--... .*-.|oo---x- - |
or what is output by _attr_str:
Lys142 NZ--HZ3 ... OE1 Glu134 27.80( 2500) 2.850(0.17) 29.14(15.66) |--... .*-.|oo---x- - |
"""
atom_str_len = 34
hbond_name = s[:atom_str_len].strip()
hbond_attr = s[atom_str_len:].strip()
# Take care of Atoms first
self.init_from_atomstr(hbond_name)
# Then take care of attributes
try:
attr_list = hbond_attr.split(')')
# Attributes from __str__
self.occ_pct = float(attr_list[0].split('(')[0])
self.num_frames = int(attr_list[0].split('(')[1])
self.graph = attr_list[-1].strip()[1:-1] # The [1:-1] takes care of leading and trailing '|'
# If present, attributes from _attr_str
attr_list = attr_list[1:-1]
if attr_list != []:
self.dist = float(attr_list[0].split('(')[0])
self.dist_stdev = float(attr_list[0].split('(')[1])
self.angle = float(attr_list[1].split('(')[0])
self.angle_stdev = float(attr_list[1].split('(')[1])
except:
print("String could not be converted to hbond:", s)
raise
self.occ_num = int(round(self.occ_pct / 100.0 * self.num_frames))
self.straight_from_ptraj = False
# ---------------
# Representations
# ---------------
def __str__(self):
return self._atom_str() + ' ' + self._occ_graph_str()
def _atom_str(self):
"""
Returns the atoms identifying the Hbond as a formatted string.
Examples:
102 NH1--HH11 ... O 88
Tyr71 OH--HH ... OG1 Asp228
"""
spaces = (7 - len(self.acceptor.resi_name)) * ' '
bond_string = spaces + self.acceptor.resi_name
acceptor_str = "%s--%s"%(self.acceptor.atom_name,
self.acceptorh.atom_name)
spaces = (10 - len(acceptor_str)) * ' '
bond_string += spaces + acceptor_str + " ... "
spaces = (5 - len(self.donor.atom_name)) * ' '
bond_string += self.donor.atom_name + spaces
spaces = (7 - len(self.donor.resi_name)) * ' '
return bond_string + self.donor.resi_name + spaces
def _attr_str(self):
"""
Returns the attributes in a formatted string.
"""
return "%6.2f(%5s)%6.3f(%4.2f)%6.2f(%5.2f) |%s|"\
%(self.occ_pct, self.num_frames, self.dist, self.dist_stdev,
self.angle, self.angle_stdev, self.graph,)
def _occ_graph_str(self):
"""
Returns the occupancy, count, and graph in a formatted string.
"""
return "%6.2f(%5s) |%s|"%(self.occ_pct, self.num_frames, self.graph)
__repr__ = __str__
# ----------
# Operations
# ----------
def __add__(self,other):
"""
Combines the statistics of two hbonds. The new number of frames, number of
occupied frames, occupancy percentage, distance, angle, distance standard
deviation, angle standard devation, and graph are all accurately calculated.
A note on the standard deviation calculations: ptraj calculates sigma as the
standard deviation (which has N in the denominator). This is not strictly
correct, as this formula only holds true if we know all of the data. However,
we know that our data only contains a sampling from the actual ensemble, so
it we should use the estimated population standard deviation (S) of the
statistics, which has N-1 in the denominator of the calculation.
"""
if not isinstance(self, type(other)):
raise Exception('Cannot add hbond to non-hbond %s object: %s'%(type(other),other))
if self._atom_str() != other._atom_str():
raise Exception('Can only add hbonds with the same donors and acceptors\n' \
'%s != %s'%(self._atom_str(),other._atom_str()))
result = HBond()
result.donor = Atom(self.donor.atom_name, self.donor.resi_name, self.donor.resi_num)
result.acceptor = Atom(self.acceptor.atom_name, self.acceptor.resi_name, self.acceptor.resi_num)
result.acceptorh = Atom(self.acceptorh.atom_name, self.acceptor.resi_name, self.acceptor.resi_num)
result.num_frames = self.num_frames + other.num_frames
sep = '|'
result.graph = self.graph + sep + other.graph
result.occ_num = self.occ_num + other.occ_num
result.occ_pct = result.occ_num * 100.0 / result.num_frames
result.straight_from_ptraj = False
if result.occ_num > 0:
result.dist = (self.occ_num * self.dist + other.occ_num * other.dist ) / result.occ_num
result.angle = (self.occ_num * self.angle + other.occ_num * other.angle) / result.occ_num
# It's relatively complicated to calculate the new standard deviation. See my Notebook 3,
# pp. 72-4 for the derivation. We must make a distinction on whether or not the data is
# straight from the ptraj files, because when we are looking at the data from ptraj
# (straight_from_ptraj = True) the std. dev. is actually sigma as opposed to S, the estimated
# standard deviation of the population. In practice, these values are close (for relatively
# large N), but I want to be precise with my statistics.
if result.occ_num == 1:
result.dist_stdev = result.angle_stdev = 0.0
else:
dist_sumsq = angle_sumsq = 0.0
if self.straight_from_ptraj:
dist_sumsq += self.dist_stdev * self.dist_stdev * self.occ_num + \
self.dist * self.dist * self.occ_num
angle_sumsq += self.angle_stdev * self.angle_stdev * self.occ_num + \
self.angle * self.angle * self.occ_num
else:
dist_sumsq += self.dist_stdev * self.dist_stdev * (self.occ_num - 1) + \
self.dist * self.dist * self.occ_num
angle_sumsq += self.angle_stdev * self.angle_stdev * (self.occ_num - 1) + \
self.angle * self.angle * self.occ_num
if other.straight_from_ptraj:
dist_sumsq += other.dist_stdev * other.dist_stdev * other.occ_num + \
other.dist * other.dist * other.occ_num
angle_sumsq += other.angle_stdev * other.angle_stdev * other.occ_num + \
other.angle * other.angle * other.occ_num
else:
dist_sumsq += other.dist_stdev * other.dist_stdev * (other.occ_num - 1) + \
other.dist * other.dist * other.occ_num
angle_sumsq += other.angle_stdev * other.angle_stdev * (other.occ_num - 1) + \
other.angle * other.angle * other.occ_num
result.dist_stdev = sqrt((dist_sumsq - result.occ_num*result.dist *result.dist ) / (result.occ_num - 1))
result.angle_stdev = sqrt((angle_sumsq - result.occ_num*result.angle *result.angle ) / (result.occ_num - 1))
#else:
# result.dist = result.dist_stdev = result.angle = result.angle_stdev = 0.0
return result
def compress_graph(self):
"""
Compresses the graph of a trajectory into one half the size.
Each pair of characters is replaced by a single character
that is representative of the percentage of occupancy for
the union of the two segments. Unfortunately, the actual
occupancy percentage of the union can not be absolutely
determined from the two symbols of the graph, so the new
graph may not be precise. See my Notebook 3, pp. 78-79
for a detailed analysis of how I determined how two
symbols should be combined.
"""
graph_sections = self.graph.split('|')
new_graph = ''
for graph_num in range(len(graph_sections)):
for i in range(0, 10, 2):
pair = graph_sections[graph_num][i:i+2]
if pair[0] == pair[1]:
new_graph += pair[0]
elif pair == ' .' or pair == '. ':
new_graph += '.'
elif pair == ' -' or pair == '- ':
new_graph += '.'
elif pair == ' o' or pair == 'o ':
new_graph += '-'
elif pair == ' x' or pair == 'x ':
new_graph += '-'
elif pair == ' *' or pair == '* ':
new_graph += 'o'
elif pair == ' @' or pair == '@ ':
new_graph += 'o'
elif pair == '.-' or pair == '-.':
new_graph += '-'
elif pair == '.o' or pair == 'o.':
new_graph += '-'
elif pair == '.x' or pair == 'x.':
new_graph += 'o'
elif pair == '.*' or pair == '*.':
new_graph += 'o'
elif pair == '.@' or pair == '@.':
new_graph += 'o'
elif pair == '-o' or pair == 'o-':
new_graph += 'o'
elif pair == '-x' or pair == 'x-':
new_graph += 'o'
elif pair == '-*' or pair == '*-':
new_graph += 'o'
elif pair == '-@' or pair == '@-':
new_graph += 'x'
elif pair == 'ox' or pair == 'xo':
new_graph += 'o'
elif pair == 'o*' or pair == '*o':
new_graph += 'x'
elif pair == 'o@' or pair == '@o':
new_graph += 'x'
elif pair == 'x*' or pair == '*x':
new_graph += 'x'
elif pair == 'x@' or pair == '@x':
new_graph += '*'
elif pair == '*@' or pair == '@*':
new_graph += '*'
if graph_num % 2 == 1:
new_graph += '|'
if new_graph[-1] == '|':
self.graph = new_graph[:-1]
else:
self.graph = new_graph
# ------ End class HBond ----
def hbond_lines(lines):
reading = False
for line in lines:
if line.strip() == ' atom# :res@atom atom# :res@atom atom# :res@atom %occupied distance angle lifetime maxocc'.strip():
reading = True
if not reading or line.strip().startswith('atom') or not line.replace('-','').strip():
continue
yield line
def hbonds_from_ptraj(f, segment_size = 1000, resi_map = None):
return [HBond(line, segment_size, resi_map) for line in hbond_lines(f)]
def is_resinum_of_interest(hbond, criteria = ['all']):
"""
Tells us if a hbond has a residue number among those we want to view
"""
if 'all' in criteria:
return True
if hbond.donor.resi_num in criteria or hbond.acceptor.resi_num in criteria:
return True
else:
return False
def is_atom_of_interest(hbond, criteria = ['all']):
"""
Tells us if an hbond has an atom type among those we want to view
"""
if 'all' in criteria:
return True
if 'protein_only' in criteria:
if hbond.donor.atom_name == 'OW' or hbond.acceptor.atom_name == 'OW':
return False
else:
return True
if 'bb_only' in criteria:
if hbond.donor.atom_name == 'O' and hbond.acceptor.atom_name == 'N':
return True
if 'not_bb' in criteria:
if hbond.donor.atom_name != 'O' or hbond.acceptor.atom_name != 'N':
return True
if hbond.donor.atom_name in criteria or \
hbond.acceptor.atom_name in criteria or \
hbond.acceptorh.atom_name in criteria:
return True
else:
return False
def combine_hbonds(hbond_files, segment_size = 1000,
resi_map = None, output_file = None,
resi_criteria = ['all'], atom_criteria = ['all'],
occ_thresh = 0.0, occ_graph_only = False,
hbond_data_dir = None):
"""
Reads through a set of files that have been output by ptraj and compiles
all the data.
hbond_files: the hbond_files output from ptraj to be combined.
segment_size: the number of frames included in each segment of the
trajectory. (default: 1000)
resi_map: a dictionary mapping the name of each residue onto the residue
number. If 'None,' the residue name will simply be the number.
(default: None)
output_file: the name of the output file. If None, the results will be
written to stdout. (default: None)
resi_criteria: a list containing residue number criteria to include in the
output. (default: ['all'])
atom_criteria: a list containing atom name criteria to include in the
output. (default: ['all'])
occ_thresh: the minimum occupancy threshold that the hbonds must have
to be reported. (default: 0.0)
occ_graph_only: if True, only the atom string, occupancy, and graph of
each hbond will be written to output. (default: False)
hbond_data_dir: the directory that contains the hbond data files. If
'None,' the file names will be used without modification, and the
output will be written to the current directory. (default: None)
"""
# Do error checking of file names
files_to_remove = []
for each_file in hbond_files:
if hbond_data_dir != None:
full_file = os.path.join(hbond_data_dir, each_file)
else:
full_file = each_file
if not os.path.exists(full_file):
print('Warning: File ' + full_file + ' does not exist.\n' + \
' Will be ignored.')
files_to_remove.append(each_file)
for each_file in files_to_remove:
hbond_files.remove(each_file)
if len(hbond_files) == 0:
sys.exit('ERROR: No input files provided.\n')
# Create list of hbonds in each file, and a master hbond dict
hbonds_from_file = {} # {filename: list of hbond objects}
combined_hbonds = {} # {hbond string: hbond object}
for each_file in hbond_files:
if hbond_data_dir != None:
hbond_file = os.path.join(hbond_data_dir, each_file)
else:
hbond_file = each_file
try:
hbond_f = file(hbond_file)
except:
sys.exit('ERROR: Could not open ' + hbond_file + '.\n')
hbonds_from_file[each_file] = hbonds_from_ptraj(hbond_f, segment_size, resi_map)
for hbond in hbonds_from_file[each_file]:
combined_hbonds[hbond._atom_str()] = None
# Run through the master hbond dict, and find out the missing hbonds
# in each file. If any are missing, create an hbond with no occupancy.
for each_file in hbond_files:
for hbond_str in combined_hbonds:
found = False
for hbond in hbonds_from_file[each_file]:
if hbond._atom_str() == hbond_str:
found = True
break
if not found:
hbond = HBond()
hbond.init_from_atomstr(hbond_str, segment_size)
hbonds_from_file[each_file].append(hbond)
# Do the addition of the hbonds from each file to create the
# final combined hbond object.
for hbond in hbonds_from_file[hbond_files[0]]:
combined_hbonds[hbond._atom_str()] = hbond
for each_file in hbond_files[1:]:
for hbond in hbonds_from_file[each_file]:
combined_hbonds[hbond._atom_str()] = combined_hbonds[hbond._atom_str()] + hbond
# Write output to file or stdout
output = []
for hbond in list(combined_hbonds.values()):
if is_resinum_of_interest(hbond, resi_criteria) and \
is_atom_of_interest(hbond, atom_criteria) and \
hbond.occ_pct > occ_thresh:
if not occ_graph_only:
output.append((hbond.occ_pct, hbond._atom_str() + ' ' + hbond._attr_str()))
else:
output.append((hbond.occ_pct, str(hbond)))
output.sort()
output.reverse()
output = [o[1] for o in output]
output_str = '\n'.join(output)
if hbond_data_dir == None:
output_dir = '.'
else:
output_dir = hbond_data_dir
if output_file == None:
print(output_str)
else:
try:
output_file = os.path.join(output_dir, output_file)
output_f = file(output_file, 'w')
except IOError:
print('Warning: Could not open ' + output_file + '.\n')
print(output_str)
else:
output_f.write(output_str + '\n')
output_f.close()
def subset_hbonds(hbond_file, output_file = None,
resi_criteria = ['all'], atom_criteria = ['all'],
occ_thresh = 0.0, occ_graph_only = False,
sort = 'occ_pct', compress = False,
hbond_data_dir = None):
"""
Following combination of hbonds by combine_hbond(), this function can be
used to write to stdout or a file only a subset of all the data present.
hbond_file: the hbond file with data to be analyzed.
output_file: the name of the output file. If None, the results will be
written to stdout. (default: None)
resi_criteria: a list containing residue number criteria to include in the
output. (default: ['all'])
atom_criteria: a list containing atom name criteria to include in the
output. (default: ['all'])
occ_thresh: the minimum occupancy threshold that the hbonds must have
to be reported. (default: 0.0)
occ_graph_only: if True, only the atom string, occupancy, and graph of
each hbond will be written to output. (default: False)
sort: one of 'occ_pct', 'donor', 'acceptor', 'dist', or 'angle' that
indicates how to sort the output. (default: occ_pct)
compress: if True, the graphs will be compressed by compress_graph().
(default: False)
hbond_data_dir: the directory that contains the hbond data files. If
'None,' the file names will be used without modification, and the
output will be written to the current directory. (default: None)
"""
# Do error checking of file names.
if not hbond_file:
sys.exit('ERROR: No input file provided.\n')
if isinstance(hbond_file, type([])):
if len(hbond_file) > 1:
print('Warning: More than 1 input file provided.\n' + \
' Will only use first one: ' + hbond_file[0])
hbond_file = hbond_file[0]
if hbond_data_dir != None:
full_file = os.path.join(hbond_data_dir, hbond_file)
else:
full_file = hbond_file
try:
hbond_f = file(full_file)
except IOError:
sys.exit('ERROR: Could not open ' + full_file + '.\n')
# Create list of hbonds in the input file, check to see if they
# satisfy the necessary criteria for output.
hbond_list = []
for line in hbond_f:
hbond = HBond()
hbond.init_from_str(line)
hbond_list.append(hbond)
output = []
for hbond in hbond_list:
if is_resinum_of_interest(hbond, resi_criteria) and \
is_atom_of_interest(hbond, atom_criteria) and \
hbond.occ_pct > occ_thresh:
if compress:
hbond.compress_graph()
if occ_graph_only:
hbond_str = str(hbond)
else:
hbond_str = hbond._atom_str() + ' ' + hbond._attr_str()
if sort not in 'occ_pct acceptor donor dist angle'.split():
print('Warning: Unknown sorting method: ' + sort + '.\n' + \
' Will sort by occupancy percentage.')
sort = 'occ_pct'
if sort == 'occ_pct':
output.append((hbond.occ_pct,
hbond.acceptor.resi_num,
hbond_str))
elif sort == 'acceptor':
output.append((hbond.acceptor.resi_num,
hbond.acceptor.atom_name,
hbond.donor.resi_num,
hbond.donor.atom_name,
hbond_str))
elif sort == 'donor':
output.append((hbond.donor.resi_num,
hbond.donor.atom_name,
hbond.acceptor.resi_num,
hbond.acceptor.atom_name,
hbond_str))
elif sort == 'dist':
output.append((hbond.dist, hbond_str))
else: # sort must be 'angle'
output.append((hbond.angle, hbond_str))
# Write output
output.sort()
if sort == 'occ_pct':
output.reverse()
output = [o[-1] for o in output]
output_str = '\n'.join(output)
if hbond_data_dir == None:
output_dir = '.'
else:
output_dir = hbond_data_dir
if output_file == None:
print(output_str)
else:
try:
output_file = os.path.join(output_dir, output_file)
output_f = file(output_file, 'w')
except IOError:
print('Warning: Could not open ' + output_file + '.\n')
print(output_str)
else:
output_f.write(output_str + '\n')
output_f.close()
def compare_hbonds(hbond_files, identifiers = [], output_file = None,
resi_criteria = ['all'], atom_criteria = ['all'],
occ_thresh = 0.0, occ_graph_only = False,
sort = 'occ_diff', compress = False,
hbond_data_dir = None):
"""
Following combination of hbonds by combine_hbond() for distinct
trajectories, this function can be used to present the data as a
side-by-side comparison of hbond occupancies.
hbond_files: the hbond files with data to be analyzed.
identifiers: the list of names associated with each hbond_file. If
the list is empty, each file will simply be assigned a number.
(default: [])
output_file: the name of the output file. If None, the results will be
written to stdout. (default: None)
resi_criteria: a list containing residue number criteria to include in the
output. (default: ['all'])
atom_criteria: a list containing atom name criteria to include in the
output. (default: ['all'])
occ_thresh: the minimum occupancy threshold that the hbonds must have
to be reported. (default: 0.0)
occ_graph_only: if True, only the atom string, occupancy, and graph of
each hbond will be written to output. (default: False)
sort: one of 'occ_diff', 'occ_pct', 'donor', or 'acceptor' that
indicates how to sort the output. (default: occ_diff)
compress: if True, the graphs will be compressed by compress_graph().
(default: False)
hbond_data_dir: the directory that contains the hbond data files. If
'None,' the file names will be used without modification, and the
output will be written to the current directory. (default: None)
"""
# Set up identifier strings
for i in range(len(hbond_files)):
if i >= len(identifiers):
identifiers.append(str(i + 1))
max_id_length = max(len(id) for id in identifiers)
for i in range(len(identifiers)):
num_spaces = max_id_length - len(identifiers[i])
identifiers[i] = num_spaces * ' ' + identifiers[i]
# Do error checking on file names
files_to_remove = []
for each_file in hbond_files:
if hbond_data_dir != None:
full_file = os.path.join(hbond_data_dir, each_file)
else:
full_file = each_file
if not os.path.exists(full_file):
print('Warning: File ' + full_file + ' does not exist.\n' + \
' Will be ignored.')
files_to_remove.append(each_file)
for each_file in files_to_remove:
i = hbond_files.index(each_file)
identifiers.remove(identifiers[i])
hbond_files.remove(each_file)
if len(hbond_files) == 0:
sys.exit('ERROR: No input files provided.\n')
if hbond_data_dir != None:
for i in range(len(hbond_files)):
hbond_files[i] = os.path.join(hbond_data_dir, hbond_files[i])
# Create dictionaries for each file indicating their hbonds
hb_dict_list = [] # One dictionary per hbond input file
combined_hbonds = {} # {hbond_string: None} just keeps cumulative track
for each_file in hbond_files:
hb_dict = {} # {hbond_string: hbond object}
for line in file(each_file):
hbond = HBond()
hbond.init_from_str(line)
if is_resinum_of_interest(hbond, resi_criteria) and \
is_atom_of_interest(hbond, atom_criteria):
if compress:
hbond.compress_graph()
hb_dict[hbond._atom_str()] = hbond
combined_hbonds[hbond._atom_str()] = None
hb_dict_list.append(hb_dict)
# Run through the master list of all hbonds. If a given
# dictionary doesn't have an entry for one, create one with
# zero occupancy.
for hb_dict in hb_dict_list:
for hbond_str in combined_hbonds:
found = False
for hbond_str_dict in hb_dict:
if hbond_str_dict == hbond_str:
found = True
break
if not found:
hbond = HBond()
hbond.init_from_atomstr(hbond_str)
hb_dict[hbond_str] = hbond
# Compile and sort relevant data
if sort not in 'occ_diff occ_pct donor acceptor'.split():
print('Warning: Unknown sorting method: ' + sort + '.\n' + \
' Will use occ_diff to sort.')
sort = 'occ_diff'
output = []
for hbond_str in combined_hbonds:
hb_list = [ hb_dict[hbond_str] for hb_dict in hb_dict_list ]
max_occ = max(hbond.occ_pct for hbond in hb_list)
min_occ = min(hbond.occ_pct for hbond in hb_list)
occ_diff = max_occ - min_occ
if sort == 'occ_diff' and occ_diff > occ_thresh:
output.append((occ_diff,
hb_list[0].acceptor.resi_num,
hb_list))
elif sort == 'occ_pct' and max_occ > occ_thresh:
output.append((max_occ,
hb_list[0].acceptor.resi_num,
hb_list))
elif sort == 'donor' and occ_diff > occ_thresh:
output.append((hb_list[0].donor.resi_num,
hb_list[0].donor.atom_name,
hb_list[0].acceptor.resi_num,
hb_list[0].acceptor.atom_name,
hb_list))
elif sort == 'acceptor' and occ_diff > occ_thresh:
output.append((hb_list[0].acceptor.resi_num,
hb_list[0].acceptor.atom_name,
hb_list[0].donor.resi_num,
hb_list[0].donor.atom_name,
hb_list))
output.sort()
if sort == 'occ_diff' or sort == 'occ_pct':
output.reverse()
output = [o[-1] for o in output]
# Write output
output_str = ''
for each_hbond in output:
for i in range(len(each_hbond)):
hbond = each_hbond[i]
if occ_graph_only:
output_str += identifiers[i] + ': ' + str(hbond) + '\n'
else:
output_str += identifiers[i] + ': ' + \
hbond._atom_str() + ' ' + hbond._attr_str() + '\n'
output_str += '\n'
if hbond_data_dir == None:
output_dir = '.'
else:
output_dir = hbond_data_dir
if output_file == None:
print(output_str[:-2]) # Removes the last 2 newlines
else:
try:
output_file = os.path.join(output_dir, output_file)
output_f = file(output_file, 'w')
except IOError:
print('Warning: Could not open ' + output_file + '.\n')
print(output_str)
else:
output_f.write(output_str[:-1])
output_f.close()
|
LernerLabs/PyPAT
|
pypat/hbond_analysis_utils.py
|
Python
|
apache-2.0
| 34,523
|
[
"Amber"
] |
d144ffa14d1addc385dc31b3ec5f470f6f62903b1aabc9594a39e11254c75328
|
from behave import *
# Common to this feature
@when('I visit the reset password page')
def impl(context):
context.browser.visit(context.config.server_url + '/accounts/password/reset/')
|
nlhkabu/connect
|
bdd/features/steps/reset_password.py
|
Python
|
bsd-3-clause
| 191
|
[
"VisIt"
] |
28fd0279253a0132b5632d20bf3bfc5bc2aec34d555cb3e6f07597be71139f25
|
# Copyright (C) 2012-2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008-2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
R"""
This abstract class provides the interface to (re-)initialize populations and handle external forces.
.. py:class:: espressopp.integrator.LBInit
.. py:method:: createDenVel(rho0,u0)
to set initial density and velocity of the LB-fluid.
:param real rho0: density
:param Real3D u0: velocity
The following options for LB-fluid initialization are supported:
* :class:`espressopp.integrator.LBInitPopUniform` A typical choice. It initializes uniformly distributed density and velocity: On every lattice site the density is ``rho0`` and velocity is ``u0``
* :class:`espressopp.integrator.LBInitPopWave` for uniform density at every lattice site, but harmonic velocity :math:`v_z (x)` with the period of lattice sites in *x*-direction
.. py:method:: setForce(value)
to set an external force onto LB-fluid.
:param Real3D value: value of the force
.. py:method:: addForce(force)
to add a new external force to the existing one.
:param Real3D force: value of the force
Two main external force types are implemented:
* :class:`espressopp.integrator.LBInitConstForce` to manage constant (gravity-like) forces acting on every lattice site and
* :class:`espressopp.integrator.LBInitPeriodicForce` to manage periodic (sin-like) forces
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.LatticeBoltzmann import *
from _espressopp import integrator_LBInit
class LBInitLocal(integrator_LBInit):
def createDenVel(self,rho0,u0):
if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.createDenVel(self,rho0,u0)
def setForce(self,force):
if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setForce(self,force)
def addForce(self,force):
if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.addForce(self,force)
if pmi.isController :
class LBInit():
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
)
|
fedepad/espressopp
|
src/integrator/LBInit.py
|
Python
|
gpl-3.0
| 3,056
|
[
"ESPResSo"
] |
1a88cbe6278a2c5b674f1a6952aedd5226a288f0a47598746642d0d9f15a2c85
|
import bpy
import bpy_extras
import bmesh
import mathutils
import math
#import copy
bl_info = {
"name": "Viewport Vertex Alignment",
"author": "Hades",
"version": (0, 2),
"blender": (2, 6, 9),
"location": "View3D > Mesh Edit > Toolshelf > Vertex Alignment",
"description": "Aligns selected vertices based on a best fit algorithm, to the view port.",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Mesh"}
def debug(msg):
#print(msg)
pass
def debug_points(points,rotation):
r=mathutils.Matrix.Rotation(math.radians(rotation),4,'Z')
f=open('c:/blender/points.txt','w')
f.write(str(['x','y','z','w','rx','ry','rz','rw','dx','dy','dz','dw','weight','residual weight']))
f.write('\n')
for p in points:
pr=p['point']*r
f.write(str([p['point'].x,p['point'].y,p['point'].z,p['point'].w,pr.x,pr.y,pr.z,pr.w,p['delta'].x,p['delta'].y,p['delta'].z,p['delta'].w,p['weight'],p['residual weight']]))
f.write('\n')
f.close()
def debug_error(error):
f=open('c:/blender/error.txt','w')
f.write(str(['error sum','mean','stdev','residuals','devs']))
f.write('\n')
for i in range(len(error['residuals'])):
f.write(str([error['error sum'],error['mean'],error['stdev'],error['residuals'][i],error['devs'][i]]))
f.write('\n')
f.close()
def main(context,properties):
#debug("\nVertex Alignment operator:-----")
#import os
#os.system('cls')
obj=bpy.context.object
if (obj.mode == 'EDIT')and(bpy.context.space_data.type=="VIEW_3D") :
bm=bmesh.from_edit_mesh(obj.data)
#debug('\nSelected Vertices:')
vertices = get_vertices(bm)
#debug([v for v in vertices])
if (len(vertices) <= 2):
#debug("mesh.vertex_alignment: Not enough vertices selected")
return {'CANCELLED'}
#debug('\nAxis:')
axis = get_axis('perspective')
#debug(axis)
#debug('\nProjection:')
points = project(vertices,axis)
#debug([p['point'] for p in points])
#debug('\nFit:')
points = fit1(properties,points) #points is being updated by ref-- note the right hand assignment is unnecessary !
#debug([p['delta'] for p in points])
#debug('\nUnproject:')
vertices_updated = unproject(points,axis,properties)
#debug([p["v'"] for p in vertices_updated])
#debug("\nUpdate Vertices:")
update_vertices(obj.data,points)
#debug ("\nend function------")
return {'FINISHED'}
#def filter_outliers(points,rotate,properties,errori):
# """This function will lower the weight of points with residuals that are outside of one standard deviation"""
# print("FILTER_OUTLIERS FUNCTION:")
# print(errori['stdev'])
# if (errori['stdev']>1.0):
# ind=0
# for d in errori['devs']:
# if (math.sqrt(d) >= errori['stdev']):
# points[ind]['weight']=0
# points[ind]['residual weight']=0
# errori['outliers'] += 1
#
# ind+=1
# return points
def filter_anchor(points,rotate,properties,error):
"""This function will add extreme weighting to the boundary points"""
#debug('Anchor: shifting weights')
#this funciton only works because the fit_functions will sort points before this function is called.
max_weight=10000
points[0]['weight'] = max_weight
points[-1]['weight'] = max_weight
points[0]['residual weight'] = 1
points[-1]['residual weight'] = 1
return points
def filter_reset_weights(points):
for p in points:
p['weight']=1
p['residual weight']=1
return points
#def copy_deltas(points):
# a={}
# for p in points:
# a[p['id']]=p['delta']
# return a
#def paste_deltas(points,best_deltas):
# for d in points['delta']:
def fit1(properties,points):
"""This function applies the fitting function several times, finding the axis rotation that causes the smallest error and returns the points.
This expects a 1D fit where x is the domain, y is the range (and therefore y is being affected in fit)."""
#debug("\nFunction Trial:")
#note: points is being treated as though it's 'by reference'-- inner values such as deltas are being changed on the main object, so be careful on order of operations.
fit_function=properties['function']
iterations=properties['iterations']
max_error=9999999999999999999999999
error=[] #list of dictionaries
smallest_error=max_error
min_error=0
#best_points=None
min_theta=0
theta=0
theta_step_initial=45
theta_step=theta_step_initial
theta_forward=True
for i in range(iterations):#angles
anchor=properties['anchor']
#outlier_filter=properties['outlier_filter']
points=filter_reset_weights(points)
try:
error.append({'failed':True,'error sum':max_error,'stdev':0,'mean':max_error,'residuals':[max_error],'devs':[0]}) #'outliers':error[i]['outliers']
while True: #filters
error[i]={'failed':True,'error sum':max_error,'stdev':0,'mean':max_error,'residuals':[max_error],'devs':[0]}
points=fit_function(points,theta,properties)
error[i]={'failed':False,'error sum':0,'stdev':0,'mean':0,'residuals':[],'devs':[]} #reset it-- in case an exception is thrown in the fit_function
SrN=0
for p in points:
error[i]['residuals'].append(
math.pow(
math.sqrt(
math.pow(p['delta'].x,2)+math.pow(p['delta'].y,2)+math.pow(p['delta'].z,2)+math.pow(p['delta'].w,2)
)
,2)*p['residual weight']
)
error[i]['error sum'] += error[i]['residuals'][-1]
SrN += p['residual weight']
N=SrN #len(error[i]['residuals'])
#print(N)
error[i]['mean']=error[i]['error sum'] / N
for e in error[i]['residuals']:
error[i]['devs'].append(math.pow(e - error[i]['mean'],2))
error[i]['stdev'] += error[i]['devs'][-1]
error[i]['stdev'] = math.sqrt(error[i]['stdev']/N)
if (not anchor):#or(outlier_filter)):
break
#if (outlier_filter):
# if ((error[i]['stdev'] <= properties['outlier_filter_target'])or(error[i]['outliers'] >= len(points)-3)): #you need at least 3 points to attempt to describe a curve.
# outlier_filter=False
# else:
# prev_outliers=error[i]['outliers']
# points=filter_outliers(points,theta,properties,error[i])
# print(["IF",prev_outliers,error[i]['outliers'],prev_outliers == error[i]['outliers']])
#
# if (error[i]['outliers'] == prev_outliers): #no more matches were found.
# print("NO MORE OUTLIERS")
# outlier_filter=False
if (anchor):
points=filter_anchor(points,theta,properties,error)
anchor=False
#print([i,theta,outlier_filter,anchor,error[i]['stdev'],error[i]['outliers']])
if (error[i]['error sum'] < smallest_error):
smallest_error=error[i]['error sum']
min_error=i
min_theta=theta
#best_points=copy.copy(points)
except ValueError as e:
print(e)
except ZeroDivisionError as e:
print(e)
#angle convergence:
if (i>360/theta_step_initial): #let it run around the cirlce a full time first, then search for the smallest error
if (theta_forward):
if (error[i]['error sum'] == smallest_error):
theta+=theta_step
elif (error[i]['error sum'] > smallest_error):
theta_step/=2.0
theta-=theta_step
theta_forward=False
else:
if (error[i]['error sum'] == smallest_error):
theta-=theta_step
elif (error[i]['error sum'] > smallest_error):
theta_step/=2.0
theta+=theta_step
theta_forward=True
elif (i == 360/theta_step_initial):
theta=min_theta
theta_step/=2.0
else:
theta+=theta_step
if (theta_step <= 0.000000001): #best angle found (or very close !)
break
#debug_error(error[min_error])
#debug_points(points,min_theta)
#one more time, the full 2 step procedure (1: dry, 2: filtered);
anchor=properties['anchor']
points=filter_reset_weights(points)
points=fit_function(points,min_theta,properties)
#if (outlier_filter):
# points=filter_outliers(points,min_theta,properties,error[min_error])
# outlier_filter=False
if (anchor):
points=filter_anchor(points,min_theta,properties,error)
anchor=False
points=fit_function(points,min_theta,properties)
#points=best_points
return points
def error_residual1(points, r , rr, properties, line_func, line_parameters):
"""This function is used in the fitting functions to determine the deltas """
#print("Residual Errors:")
for p in points:
pr=p['point']*r
x = pr.x
y = pr.y
yy = line_func(x,line_parameters)
p['delta'] = mathutils.Vector((0,(y - yy),0,0))*rr
return points
def sort_index1(points,r):
"""This function sorts points based on their domain (assumed as x axis when rotated) """
#print("Sorting Indices:")
points = sorted(points, key=lambda xx: (xx['point']*r).x)
return points
def fit_linear1(points,rotate,properties=None):
"""This function attempts to fit a given set of points to a linear line: y = a1*x + a0"""
r=mathutils.Matrix.Rotation(math.radians(rotate),4,'Z')
rr=mathutils.Matrix.Rotation(math.radians(-rotate),4,'Z')
Sxy = 0
Sx = 0
Sy = 0
Sx2 = 0
Sw = 0
for p in points:
pr=p['point']*r
x = pr.x
y = pr.y
Sxy += x*y * p['weight']
Sx += x * p['weight']
Sy += y * p['weight']
Sx2 += math.pow(x,2) * p['weight']
Sw += p['weight']
N = Sw
a1 = ( N*Sxy - Sx*Sy ) / ( N*Sx2 - math.pow(Sx,2))
a0 = 1/N * Sy - a1 * 1/N * Sx
def line_func(x,a):
return a[0] + a[1]*x
points=sort_index1(points,r)
return error_residual1(points,r,rr,properties,line_func,[a0,a1])
def fit_quadratic1(points,rotate,properties=None):
"""This function attempts to fit a given set of points to a quadratic polynomial line: y = a2*x^2 + a1*x + a0"""
r=mathutils.Matrix.Rotation(math.radians(rotate),4,'Z')
rr=mathutils.Matrix.Rotation(math.radians(-rotate),4,'Z')
Sxy = 0
Sx = 0
Sy = 0
Sx2 = 0
Sx2y = 0
Sx3 = 0
Sx4 = 0
Sw = 0
for p in points:
pr=p['point']*r
x = pr.x
y = pr.y
Sxy = Sxy + x*y * p['weight']
Sx = Sx + x * p['weight']
Sy = Sy + y * p['weight']
Sx2 = Sx2 + math.pow(x,2) * p['weight']
Sx2y = Sx2y+ math.pow(x,2)*y * p['weight']
Sx3 = Sx3 + math.pow(x,3) * p['weight']
Sx4 = Sx4 + math.pow(x,4) * p['weight']
Sw += p['weight']
N = Sw
A=[[N, Sx, Sx2,Sy], [Sx, Sx2, Sx3,Sxy], [Sx2, Sx3, Sx4,Sx2y]]
xM=like_a_gauss(A)
a0=xM[0][3]
a1=xM[1][3]
a2=xM[2][3]
def line_func(x,a):
return a[0] + a[1]*x + a[2]*math.pow(x,2)
points=sort_index1(points,r)
return error_residual1(points,r,rr,properties,line_func,[a0,a1,a2])
def fit_cubic1(points,rotate,properties=None):
"""This function attempts to fit a given set of points to a cubic polynomial line: y = a3*x^3 + a2*x^2 + a1*x + a0"""
r=mathutils.Matrix.Rotation(math.radians(rotate),4,'Z')
rr=mathutils.Matrix.Rotation(math.radians(-rotate),4,'Z')
Sxy = 0
Sx = 0
Sy = 0
Sx2 = 0
Sx2y = 0
Sx3y = 0
Sx3 = 0
Sx4 = 0
Sx5 = 0
Sx6 = 0
Sw = 0
for p in points:
pr=p['point']*r
x = pr.x
y = pr.y
Sxy = Sxy + x*y * p['weight']
Sx = Sx + x * p['weight']
Sy = Sy + y * p['weight']
Sx2 = Sx2 + math.pow(x,2) * p['weight']
Sx2y = Sx2y+ math.pow(x,2)*y * p['weight']
Sx3y = Sx3y+ math.pow(x,3)*y * p['weight']
Sx3 = Sx3 + math.pow(x,3) * p['weight']
Sx4 = Sx4 + math.pow(x,4) * p['weight']
Sx5 = Sx5 + math.pow(x,5) * p['weight']
Sx6 = Sx6 + math.pow(x,6) * p['weight']
Sw += p['weight']
N = Sw
A=[[N, Sx, Sx2,Sx3,Sy], [Sx, Sx2, Sx3,Sx4,Sxy], [Sx2, Sx3, Sx4, Sx5,Sx2y], [Sx3, Sx4, Sx5, Sx6,Sx3y]]
xM=like_a_gauss(A)
a0=xM[0][4]
a1=xM[1][4]
a2=xM[2][4]
a3=xM[3][4]
def line_func(x,a):
return a[0] + a[1]*x + a[2]*math.pow(x,2) + a[3]*math.pow(x,3)
points=sort_index1(points,r)
return error_residual1(points,r,rr,properties,line_func,[a0,a1,a2,a3])
def fit_cosine1(points,rotate,properties):
"""This function attempts to fit a given set of points to a cosine curve: y = a0 + a1*cos(w*x) + a2*cos(w*x) """
r=mathutils.Matrix.Rotation(math.radians(rotate),4,'Z')
rr=mathutils.Matrix.Rotation(math.radians(-rotate),4,'Z')
omega=properties['cosine_omega']
Sycos = 0
Sysin = 0
Scos = 0
Scos2 = 0
Ssin = 0
Ssin2 = 0
Sy = 0
Scossin = 0
Sw = 0
for p in points:
pr=p['point']*r
x = pr.x
y = pr.y
Sy = Sy + y* p['weight']
Sycos=Sycos + y * math.cos(omega * x)* p['weight']
Sysin=Sysin + y * math.sin(omega * x)* p['weight']
Scos = Scos + math.cos(omega * x)* p['weight']
Ssin = Ssin + math.sin(omega * x)* p['weight']
Scos2= Scos2+ math.pow(math.cos(omega * x),2)* p['weight']
Ssin2= Ssin2+ math.pow(math.sin(omega * x),2)* p['weight']
Scossin= Scossin+ math.cos(omega * x) * math.sin(omega * x)* p['weight']
Sw += p['weight']
N = Sw
A=[[N, Scos, Ssin, Sy], [Scos, Scos2, Scossin, Sycos], [Ssin, Scossin, Ssin2, Sysin]]
xM=like_a_gauss(A)
a0=xM[0][3]
a1=xM[1][3]
a2=xM[2][3]
def line_func(x,a):
return a[0] + a[1]*math.cos(a[3] * x) + a[2] * math.sin(a[3] * x);
points=sort_index1(points,r)
return error_residual1(points,r,rr,properties,line_func,[a0,a1,a2,omega])
def get_vertices(mesh):
"""Returns the active list of selected vertices."""
verts = []
for v in mesh.verts:
if v.select:
verts.append(v)
return verts
def get_axis(type):
"""Gets the axis we will be performing the rotation on. Returns a projection matrix"""
if (type == 'perspective'):
region = bpy.context.region
rv3d = bpy.context.region_data
else:
#debug('mesh.vertex_align: get_axis: Unexpected input')
return None
return {"region":region,"rv3d":rv3d}
def project(vertices,axis):
"""Project the vertices onto a plane of the given axis."""
points = []
for v in vertices:
vec = mathutils.Vector(v.co)
p = bpy_extras.view3d_utils.location_3d_to_region_2d(axis['region'],axis['rv3d'],vec).to_4d()
depth = vec
points.append({"id":v,"point":p,"delta":None,"v'":None,"depth":depth,"weight":1.0,"residual weight":1.0,'index':None}) #id=original vert reference, point=project point on plane, d=delta changes by fit function, v' = Vector of final 3d vert position, depth=depth vector needed for unprojecting, weight=how much a point impacts the fit, residual weight=how much a points varience should be counted in the error.
return points
def unproject(points,axis,properties):
"""Unproject points on a plane to vertices in 3d space."""
for p in points:
new_p = p['point']-p['delta']*properties['influence']
old_v = p['id'].co
new_v = bpy_extras.view3d_utils.region_2d_to_location_3d(axis['region'],axis['rv3d'],new_p.to_2d(),p['depth'])
p["v'"]=new_v
return points
def update_vertices(mesh,points):
"""Update the active set of selected vertices with their fitted positions."""
for p in points:
p['id'].co = p["v'"].to_3d().to_tuple()
bmesh.update_edit_mesh(mesh)
def like_a_gauss(mat):
"""
Implementation of the Gaussian Elimination Algorithm for finding the row-reduced echelon form of a given matrix.
No pivoting is done.
Requires Python 3 due to the different behaviour of the division operation in earlier versions of Python.
Released under the Public Domain (if you want it - you probably don't)
https://gist.github.com/zhuowei/7149445
Changes mat into Reduced Row-Echelon Form.
"""
# Let's do forward step first.
# at the end of this for loop, the matrix is in Row-Echelon format.
for i in range(min(len(mat), len(mat[0]))):
# every iteration, ignore one more row and column
for r in range(i, len(mat)):
# find the first row with a nonzero entry in first column
zero_row = mat[r][i] == 0
if zero_row:
continue
# swap current row with first row
mat[i], mat[r] = mat[r], mat[i]
# add multiples of the new first row to lower rows so lower
# entries of first column is zero
first_row_first_col = mat[i][i]
for rr in range(i + 1, len(mat)):
this_row_first = mat[rr][i]
scalarMultiple = -1 * this_row_first / first_row_first_col
for cc in range(i, len(mat[0])):
mat[rr][cc] += mat[i][cc] * scalarMultiple
break
# At the end of the forward step
# Now reduce
for i in range(min(len(mat), len(mat[0])) - 1, -1, -1):
# divide last non-zero row by first non-zero entry
first_elem_col = -1
first_elem = -1
for c in range(len(mat[0])):
if mat[i][c] == 0:
continue
if first_elem_col == -1:
first_elem_col = c
first_elem = mat[i][c]
mat[i][c] /= first_elem
# add multiples of this row so all numbers above the leading 1 is zero
for r in range(i):
this_row_above = mat[r][first_elem_col]
scalarMultiple = -1 * this_row_above
for cc in range(len(mat[0])):
mat[r][cc] += mat[i][cc] * scalarMultiple
# disregard this row and continue
return mat
class OPS_MESH_hd_viewport_vertexalign(bpy.types.Operator):
"""Align Vertices based on a least squares algorithm, based on the active view port."""
bl_idname = "mesh.hd_viewport_vertex_align"
bl_label = "3D Viewport Vertex Alignment"
bl_options = {'REGISTER', 'UNDO'}
function = bpy.props.EnumProperty(
items=[('LINEAR1','1D Linear','Linear Least Squares Method'),
('QUADRATIC1','1D Parabolic','Quadratic Polynomial Least Squares Method'),
('CUBIC1','1D Cubic', 'Cubic Polynomial Least Squares Method'),
('COSINE1','1D Cosine', 'Cosine Least Squares Method')],
name="Fit type",
description="Select the method to align the vertices by.",
default='LINEAR1')
cosine_omega = bpy.props.FloatProperty(
name="Omega",
description="Angular frequency",
default=0.01,
min=0.0001,
step=0.001,
soft_min=0.001)
influence = bpy.props.FloatProperty(
name="Influence",
description="How much the best fit solution is applied.",
default=1,
soft_max=1,
soft_min=0,
step=0.01)
#outlier_filter = bpy.props.BoolProperty(
# name="Outlier Filter",
# description="Should vertices that are outside of the standard deviation be filtered out of the fitting function?",
# default=True)
#outlier_filter_target = bpy.props.FloatProperty(
# name="Standard Deviation target for error deviation.",
# description="How far is too far from a fitted line?",
# default=10,
# min=0.1,
# step=0.5)
iterations = bpy.props.IntProperty(
name="Max Iterations",
description="Max number of iterations to try and solve.",
default=180,
soft_max=180,
min=1)
anchor = bpy.props.BoolProperty(
name="Anchor Boundaries",
description="Should the start and end vertices be anchored?",
default=True)
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
if (self.function == 'LINEAR1'):
fit_function=fit_linear1
elif (self.function == 'QUADRATIC1'):
fit_function=fit_quadratic1
elif (self.function == 'CUBIC1'):
fit_function=fit_cubic1
elif (self.function == 'COSINE1'):
fit_function=fit_cosine1
else:
#debug('unexpected input for "function" in mesh.vertex_align')
fit_function=fit_linear1
#,"max_error":math.pow(self.max_error,0.5),,"outlier_filter":self.outlier_filter,"outlier_filter_target":self.outlier_filter_target
properties={"function":fit_function,"cosine_omega":self.cosine_omega,"influence":self.influence,"iterations":self.iterations,"anchor":self.anchor}
return main(context,properties)
class ViewportVertexAlignMenu(bpy.types.Menu):
bl_label = "Vertex Alignment"
bl_idname = "MESH_MT_edit_mesh_hd_viewport_vertex_align"
def draw(self, context):
layout = self.layout
layout.operator("mesh.hd_viewport_vertex_align",text="Linear",icon='CURVE_PATH').function='LINEAR1'
layout.operator("mesh.hd_viewport_vertex_align",text="Parabolic",icon='CURVE_BEZCURVE').function='QUADRATIC1'
layout.operator("mesh.hd_viewport_vertex_align",text="Cubic",icon='CURVE_BEZCURVE').function='CUBIC1'
layout.operator("mesh.hd_viewport_vertex_align",text="Cosine",icon='CURVE_BEZCURVE').function='COSINE1'
def draw_item(self, context):
layout = self.layout
layout.menu(ViewportVertexAlignMenu.bl_idname)
def menu_specials(self, context):
self.layout.menu("MESH_MT_edit_mesh_hd_viewport_vertex_align")
self.layout.separator()
class ViewportVertexAlignPanel(bpy.types.Panel):
"""Creates a Panel in the scene context of the properties editor"""
bl_label = "3D Viewport Vertex Alignment"
bl_idname = "VIEW3D_PT_hd_viewport_vertex_alignment"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_context = "mesh_edit"
def draw(self, context):
layout = self.layout
col = layout.column(align=True)
col.operator("mesh.hd_viewport_vertex_align")
def register():
bpy.utils.register_class(OPS_MESH_hd_viewport_vertexalign)
bpy.utils.register_class(ViewportVertexAlignMenu)
bpy.utils.register_class(ViewportVertexAlignPanel)
bpy.types.VIEW3D_MT_edit_mesh_specials.prepend(menu_specials)
bpy.types.INFO_HT_header.append(draw_item)
def unregister():
bpy.utils.unregister_class(OPS_MESH_hd_viewport_vertexalign)
bpy.utils.unregister_class(ViewportVertexAlignMenu)
bpy.utils.unregister_class(ViewportVertexAlignPanel)
bpy.types.VIEW3D_MT_edit_mesh_specials.remove(menu_specials)
bpy.types.INFO_HT_header.remove(draw_item)
if __name__ == "__main__":
register()
#unregister()
#register()
# test call
#bpy.ops.wm.call_menu(name=ViewportVertexAlignMenu.bl_idname)
#unregister()
|
hdunderscore/mesh_viewport_vertex_align
|
mesh_viewport_vertex_alignment.py
|
Python
|
mit
| 25,359
|
[
"Gaussian"
] |
a509b2875da983b82728e460c4fda4bf9a74f3dbdbc8e1fc17cb820bfc17b8e8
|
# Orca
#
# Copyright 2010-2011 The Orca Team
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
""" A list of common keybindings and unbound keys
pulled out from default.py: getKeyBindings()
with the goal of being more readable and less monolithic.
"""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010-2011 The Orca Team"
__license__ = "LGPL"
from . import keybindings
# Storing values
defaultModifierMask = keybindings.defaultModifierMask
ORCA_MODIFIER_MASK = keybindings.ORCA_MODIFIER_MASK
NO_MODIFIER_MASK = keybindings.NO_MODIFIER_MASK
ORCA_SHIFT_MODIFIER_MASK = keybindings.ORCA_SHIFT_MODIFIER_MASK
ORCA_CTRL_MODIFIER_MASK = keybindings.ORCA_CTRL_MODIFIER_MASK
ORCA_ALT_MODIFIER_MASK = keybindings.ORCA_ALT_MODIFIER_MASK
ORCA_CTRL_ALT_MODIFIER_MASK = keybindings.ORCA_CTRL_ALT_MODIFIER_MASK
SHIFT_ALT_MODIFIER_MASK = keybindings.SHIFT_ALT_MODIFIER_MASK
keymap = (
("F11", defaultModifierMask, ORCA_MODIFIER_MASK,
"toggleTableCellReadModeHandler"),
("f", defaultModifierMask, ORCA_MODIFIER_MASK,
"readCharAttributesHandler"),
("h", defaultModifierMask, ORCA_MODIFIER_MASK,
"enterLearnModeHandler", 1),
("space", defaultModifierMask, ORCA_MODIFIER_MASK,
"preferencesSettingsHandler"),
("space", defaultModifierMask, ORCA_CTRL_MODIFIER_MASK,
"appPreferencesSettingsHandler"),
("s", defaultModifierMask, ORCA_MODIFIER_MASK,
"toggleSilenceSpeechHandler"),
("v", defaultModifierMask, ORCA_MODIFIER_MASK,
"toggleSpeechVerbosityHandler"),
("t", defaultModifierMask, ORCA_MODIFIER_MASK,
"presentTimeHandler", 1),
("t", defaultModifierMask, ORCA_MODIFIER_MASK,
"presentDateHandler", 2),
#####################################################################
# #
# Bookmark key bindings #
# #
#####################################################################
# key binding to save bookmark information to disk
("b", defaultModifierMask, ORCA_ALT_MODIFIER_MASK,
"saveBookmarks"),
# key binding to move to the previous bookmark
("b", defaultModifierMask, ORCA_SHIFT_MODIFIER_MASK,
"goToPrevBookmark"),
# key binding to move to the next bookmark
("b", defaultModifierMask, ORCA_MODIFIER_MASK,
"goToNextBookmark"),
# key bindings for '1' through '6' for relevant commands
# 'Add bookmark' key bindings
("1", defaultModifierMask, ORCA_ALT_MODIFIER_MASK,
"addBookmark"),
("2", defaultModifierMask, ORCA_ALT_MODIFIER_MASK,
"addBookmark"),
("3", defaultModifierMask, ORCA_ALT_MODIFIER_MASK,
"addBookmark"),
("4", defaultModifierMask, ORCA_ALT_MODIFIER_MASK,
"addBookmark"),
("5", defaultModifierMask, ORCA_ALT_MODIFIER_MASK,
"addBookmark"),
("6", defaultModifierMask, ORCA_ALT_MODIFIER_MASK,
"addBookmark"),
# 'Go to bookmark' key bindings
("1", defaultModifierMask, ORCA_MODIFIER_MASK,
"goToBookmark"),
("2", defaultModifierMask, ORCA_MODIFIER_MASK,
"goToBookmark"),
("3", defaultModifierMask, ORCA_MODIFIER_MASK,
"goToBookmark"),
("4", defaultModifierMask, ORCA_MODIFIER_MASK,
"goToBookmark"),
("5", defaultModifierMask, ORCA_MODIFIER_MASK,
"goToBookmark"),
("6", defaultModifierMask, ORCA_MODIFIER_MASK,
"goToBookmark"),
("BackSpace", defaultModifierMask, ORCA_MODIFIER_MASK,
"bypassNextCommandHandler"),
#####################################################################
# #
# Unbound handlers #
# #
#####################################################################
("", defaultModifierMask, NO_MODIFIER_MASK,
"cycleSettingsProfileHandler"),
("", defaultModifierMask, NO_MODIFIER_MASK,
"cycleCapitalizationStyleHandler"),
("", defaultModifierMask, NO_MODIFIER_MASK,
"cycleDebugLevelHandler"),
("", defaultModifierMask, NO_MODIFIER_MASK,
"decreaseSpeechRateHandler"),
("", defaultModifierMask, NO_MODIFIER_MASK,
"increaseSpeechRateHandler"),
("", defaultModifierMask, NO_MODIFIER_MASK,
"decreaseSpeechPitchHandler"),
("", defaultModifierMask, NO_MODIFIER_MASK,
"increaseSpeechPitchHandler"),
("", defaultModifierMask, NO_MODIFIER_MASK,
"increaseSpeechVolumeHandler"),
("", defaultModifierMask, NO_MODIFIER_MASK,
"decreaseSpeechVolumeHandler"),
("", defaultModifierMask, NO_MODIFIER_MASK,
"panBrailleLeftHandler"),
("", defaultModifierMask, NO_MODIFIER_MASK,
"panBrailleRightHandler"),
("", defaultModifierMask, NO_MODIFIER_MASK,
"toggleMouseReviewHandler"),
("", defaultModifierMask, NO_MODIFIER_MASK,
"toggleSpeakingIndentationJustificationHandler"),
("", defaultModifierMask, NO_MODIFIER_MASK,
"cycleSpeakingPunctuationLevelHandler"),
("", defaultModifierMask, NO_MODIFIER_MASK,
"cycleKeyEchoHandler"),
("", defaultModifierMask, NO_MODIFIER_MASK,
"repeatLastNotificationMessageHandler"),
("", defaultModifierMask, NO_MODIFIER_MASK,
"repeatPreviousNotificationMessageHandler"),
("", defaultModifierMask, NO_MODIFIER_MASK,
"enableNotificationMessageListModeHandler"),
("", defaultModifierMask, NO_MODIFIER_MASK,
"flatReviewCopyHandler"),
("", defaultModifierMask, NO_MODIFIER_MASK,
"flatReviewAppendHandler"),
("", defaultModifierMask, NO_MODIFIER_MASK,
"shutdownHandler"),
)
|
pvagner/orca
|
src/orca/common_keyboardmap.py
|
Python
|
lgpl-2.1
| 6,459
|
[
"ORCA"
] |
7851c66cecb6f760c23f2e2828e836c4a7b21a10e751411bda2222147d3383b2
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from Plugins.Extensions.OpenWebif.local import tstrings
from json import dumps
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.353708
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/main.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class main(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(main, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def menu(self, title, name, content, **KWS):
## CHEETAH: generated from #def menu($title, $name, $content) at line 37, col 4.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''\t\t\t<div id="leftmenu_main">\r
\t\t\t\t<div id="leftmenu_top">\r
\t\t\t\t\t''')
_v = VFFSL(SL,"title",True) # u'$title' on line 40, col 6
if _v is not None: write(_filter(_v, rawExpr=u'$title')) # from line 40, col 6.
write(u'''\r
''')
if VFFSL(SL,"name",True) in VFFSL(SL,"collapsed",True): # generated from line 41, col 6
write(u'''\t\t\t\t\t<div id="leftmenu_expander_''')
_v = VFFSL(SL,"name",True) # u'$name' on line 42, col 33
if _v is not None: write(_filter(_v, rawExpr=u'$name')) # from line 42, col 33.
write(u'''" class="leftmenu_icon leftmenu_icon_collapse" onclick="toggleMenu(\'''')
_v = VFFSL(SL,"name",True) # u'$name' on line 42, col 106
if _v is not None: write(_filter(_v, rawExpr=u'$name')) # from line 42, col 106.
write(u'''\');"></div>\r
''')
else: # generated from line 43, col 6
write(u'''\t\t\t\t\t<div id="leftmenu_expander_''')
_v = VFFSL(SL,"name",True) # u'$name' on line 44, col 33
if _v is not None: write(_filter(_v, rawExpr=u'$name')) # from line 44, col 33.
write(u'''" class="leftmenu_icon" onclick="toggleMenu(\'''')
_v = VFFSL(SL,"name",True) # u'$name' on line 44, col 83
if _v is not None: write(_filter(_v, rawExpr=u'$name')) # from line 44, col 83.
write(u'''\');"></div>\r
''')
write(u'''\t\t\t\t</div>\r
''')
if VFFSL(SL,"name",True) in VFFSL(SL,"collapsed",True): # generated from line 47, col 5
write(u'''\t\t\t\t<div id="leftmenu_container_''')
_v = VFFSL(SL,"name",True) # u'$name' on line 48, col 33
if _v is not None: write(_filter(_v, rawExpr=u'$name')) # from line 48, col 33.
write(u'''" style="display: none;">\r
''')
else: # generated from line 49, col 5
write(u'''\t\t\t\t<div id="leftmenu_container_''')
_v = VFFSL(SL,"name",True) # u'$name' on line 50, col 33
if _v is not None: write(_filter(_v, rawExpr=u'$name')) # from line 50, col 33.
write(u'''">\r
''')
write(u'''\t\t\t\t''')
_v = VFFSL(SL,"content",True) # u'$content' on line 52, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$content')) # from line 52, col 5.
write(u'''\r
\t\t\t\t</div>\r
\t\t\t</div>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def mainMenu(self, **KWS):
## CHEETAH: generated from #def mainMenu at line 57, col 4.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''\t\t\t<ul>\r
\t\t\t\t<li><a href=\'#\' onclick="load_maincontent(\'ajax/tv\'); return false;">''')
_v = VFFSL(SL,"tstrings",True)['television'] # u"$tstrings['television']" on line 59, col 74
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['television']")) # from line 59, col 74.
write(u'''</a></li>\r
\t\t\t\t<li><a href=\'#\' onclick="load_maincontent(\'ajax/radio\'); return false;">''')
_v = VFFSL(SL,"tstrings",True)['radio'] # u"$tstrings['radio']" on line 60, col 77
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['radio']")) # from line 60, col 77.
write(u"""</a></li>\r
\t\t\t\t<li><a href='ajax/multiepg2' target=_blank>""")
_v = VFFSL(SL,"tstrings",True)['tv_multi_epg'] # u"$tstrings['tv_multi_epg']" on line 61, col 48
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['tv_multi_epg']")) # from line 61, col 48.
write(u'''</a></li>\r
\t\t\t</ul>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def volumeMenu(self, **KWS):
## CHEETAH: generated from #def volumeMenu at line 65, col 4.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''\t\t\t<div class="volslider">\r
\t\t\t\t\t<p style="text-align:center; padding-bottom:8px;"> \r
\t\t\t\t\t\t<label for="amount">''')
_v = VFFSL(SL,"tstrings",True)['volume'] # u"$tstrings['volume']" on line 68, col 27
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['volume']")) # from line 68, col 27.
write(u''':</label>\r
\t\t\t\t\t\t<input type="text" id="amount" style="border:0; color:#f6931f; font-weight:bold; width:40px;" />\r
\t\t\t\t\t</p>\r
\t\t\t\t<div id="slider" style="width:130px;"></div>\r
\t\t\t</div>\r
\t\t\t<div style="width:100%; text-align:center; padding-top:5px; padding-bottom:10px;"><img id="volimage" src="images/volume.png" title="" border="0"></div>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def controlMenu(self, **KWS):
## CHEETAH: generated from #def controlMenu at line 76, col 4.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''\t\t\t<ul>\r
\t\t\t\t<li><a href=\'#\' onclick="load_maincontent(\'ajax/powerstate\'); return false;">''')
_v = VFFSL(SL,"tstrings",True)['powercontrol'] # u"$tstrings['powercontrol']" on line 78, col 82
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['powercontrol']")) # from line 78, col 82.
write(u'''</a></li>\r
\t\t\t\t<li><a href=\'#\' onclick="load_maincontent(\'ajax/screenshot\'); return false;">''')
_v = VFFSL(SL,"tstrings",True)['grabscreenshot'] # u"$tstrings['grabscreenshot']" on line 79, col 82
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['grabscreenshot']")) # from line 79, col 82.
write(u'''</a></li>\r
\t\t\t\t<li><a href=\'#\' onclick="load_maincontent(\'ajax/message\'); return false;">''')
_v = VFFSL(SL,"tstrings",True)['sendamessage'] # u"$tstrings['sendamessage']" on line 80, col 79
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['sendamessage']")) # from line 80, col 79.
write(u'''</a></li>\r
\t\t\t\t<li><a href=\'#\' onclick="load_maincontent(\'ajax/timers\'); return false;">''')
_v = VFFSL(SL,"tstrings",True)['timers'] # u"$tstrings['timers']" on line 81, col 78
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['timers']")) # from line 81, col 78.
write(u'''</a></li>\r
\t\t\t</ul>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def infoMenu(self, **KWS):
## CHEETAH: generated from #def infoMenu at line 85, col 4.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''\t\t\t<ul>\r
\t\t\t\t<li><a href="#" onclick="load_maincontent(\'ajax/boxinfo\'); return false">''')
_v = VFFSL(SL,"tstrings",True)['box_info'] # u"$tstrings['box_info']" on line 87, col 78
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['box_info']")) # from line 87, col 78.
write(u'''</a></li>\r
\t\t\t\t<li><a href="#" onclick="load_maincontent(\'ajax/about\'); return false">''')
_v = VFFSL(SL,"tstrings",True)['about'] # u"$tstrings['about']" on line 88, col 76
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['about']")) # from line 88, col 76.
write(u'''</a></li>\r
\t\t\t</ul>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def streamMenu(self, **KWS):
## CHEETAH: generated from #def streamMenu at line 92, col 4.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''\t\t\t<ul>\r
\t\t\t\t<li><a href=\'#\' onclick="load_maincontent_spin(\'ajax/movies\'); return false;">''')
_v = VFFSL(SL,"tstrings",True)['movies'] # u"$tstrings['movies']" on line 94, col 83
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['movies']")) # from line 94, col 83.
write(u'''</a></li>\r
<!--\t\t\t\t<li><a href=\'#\' onclick="load_maincontent(\'ajax/workinprogress\'); return false;">Web Tv</a></li> -->\r
''')
if VFFSL(SL,"zapstream",True): # generated from line 96, col 5
write(u'''\t\t\t\t<li><input type="checkbox" name="zapstream" checked="checked" />''')
_v = VFFSL(SL,"tstrings",True)['zapbeforestream'] # u"$tstrings['zapbeforestream']" on line 97, col 69
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['zapbeforestream']")) # from line 97, col 69.
write(u'''</li>\r
''')
else: # generated from line 98, col 5
write(u'''\t\t\t\t<li><input type="checkbox" name="zapstream" />''')
_v = VFFSL(SL,"tstrings",True)['zapbeforestream'] # u"$tstrings['zapbeforestream']" on line 99, col 51
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['zapbeforestream']")) # from line 99, col 51.
write(u'''</li>\r
''')
write(u'''\t\t\t</ul>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def searchMenu(self, **KWS):
## CHEETAH: generated from #def searchMenu at line 104, col 4.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''\t\t\t<form action="" onSubmit="open_epg_search_pop(); return false;">\r
\t\t\t\t<div style="width:100%; text-align:center; padding-top:5px;"><input type="text" id="epgSearch" size="14" /></div>\r
\t\t\t\t<div style="width:100%; text-align:center;padding-top:5px; padding-bottom:7px;" class="epgsearch"><button>''')
_v = VFFSL(SL,"tstrings",True)['search'] # u"$tstrings['search']" on line 107, col 111
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['search']")) # from line 107, col 111.
write(u'''</button></div>\r
\t\t\t</form>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def remoteMenu(self, **KWS):
## CHEETAH: generated from #def remoteMenu at line 111, col 4.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''\t\t\t<div style="width:100%; text-align:center;">\r
\t\t\t\t<img src="images/remotes/ow_remote.png" width="135" height="183" usemap="#menuremote" border="0">\r
\t\t\t\t<map name="menuremote" >\r
\t\t\t\t\t<area shape="circle" coords="67,148,13" alt="ok" onclick="pressMenuRemote(\'352\');">\r
\t\t\t\t\t<area shape="circle" coords="68,173,9" alt="down" onclick="pressMenuRemote(\'108\');">\r
\t\t\t\t\t<area shape="circle" coords="44,148,9" alt="left" onclick="pressMenuRemote(\'105\');">\r
\t\t\t\t\t<area shape="circle" coords="92,147,9" alt="right" onclick="pressMenuRemote(\'106\');">\r
\t\t\t\t\t<area shape="circle" coords="68,126,8" alt="up" onclick="pressMenuRemote(\'103\');">\r
\t\t\t\t\t<area shape="circle" coords="117,163,10" alt="blue" onclick="pressMenuRemote(\'401\');">\r
\t\t\t\t\t<area shape="circle" coords="118,132,11" alt="yellow" onclick="pressMenuRemote(\'400\');">\r
\t\t\t\t\t<area shape="circle" coords="18,163,11" alt="green" onclick="pressMenuRemote(\'399\');">\r
\t\t\t\t\t<area shape="circle" coords="19,133,10" alt="red" onclick="pressMenuRemote(\'398\');">\r
\t\t\t\t\t<area shape="rect" coords="5,89,44,117" alt="menu" onclick="pressMenuRemote(\'139\');">\r
\t\t\t\t\t<area shape="rect" coords="90,89,128,117" alt="exit" onclick="pressMenuRemote(\'174\');">\r
\t\t\t\t\t<area shape="rect" coords="47,89,87,117" alt="0" onclick="pressMenuRemote(\'11\');">\r
\t\t\t\t\t<area shape="rect" coords="90,60,128,86" alt="9" onclick="pressMenuRemote(\'10\');">\r
\t\t\t\t\t<area shape="rect" coords="47,60,87,86" alt="8" onclick="pressMenuRemote(\'9\');">\r
\t\t\t\t\t<area shape="rect" coords="4,60,44,86" alt="7" onclick="pressMenuRemote(\'8\');">\r
\t\t\t\t\t<area shape="rect" coords="90,30,129,57" alt="6" onclick="pressMenuRemote(\'7\');">\r
\t\t\t\t\t<area shape="rect" coords="47,30,87,57" alt="5" onclick="pressMenuRemote(\'6\');">\r
\t\t\t\t\t<area shape="rect" coords="4,30,44,57" alt="4" onclick="pressMenuRemote(\'5\');">\r
\t\t\t\t\t<area shape="rect" coords="90,0,129,27" alt="3" onclick="pressMenuRemote(\'4\');">\r
\t\t\t\t\t<area shape="rect" coords="46,0,88,28" alt="2" onclick="pressMenuRemote(\'3\');">\r
\t\t\t\t\t<area shape="rect" coords="5,0,45,28" alt="1" onclick="pressMenuRemote(\'2\');">\r
\t\t\t\t</map>\r
\t\t\t\t<div id="help">\r
\t\t\t\t\t''')
_v = VFFSL(SL,"tstrings",True)['shiftforlong'] # u"$tstrings['shiftforlong']" on line 138, col 6
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['shiftforlong']")) # from line 138, col 6.
write(u'''\r
\t\t\t\t</div>\r
\t\t\t\t<ul>\r
''')
if VFFSL(SL,"remotegrabscreenshot",True): # generated from line 141, col 6
write(u'''\t\t\t\t\t<li><input type="checkbox" name="remotegrabscreen" checked="checked" />''')
_v = VFFSL(SL,"tstrings",True)['grabscreenshot'] # u"$tstrings['grabscreenshot']" on line 142, col 77
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['grabscreenshot']")) # from line 142, col 77.
write(u'''</li>\r
''')
else: # generated from line 143, col 6
write(u'''\t\t\t\t\t<li><input type="checkbox" name="remotegrabscreen" />''')
_v = VFFSL(SL,"tstrings",True)['grabscreenshot'] # u"$tstrings['grabscreenshot']" on line 144, col 59
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['grabscreenshot']")) # from line 144, col 59.
write(u'''</li>\r
''')
write(u'''\t\t\t\t\t<li><a href="#" onclick="toggleFullRemote(); return false;">''')
_v = VFFSL(SL,"tstrings",True)['showfullremote'] # u"$tstrings['showfullremote']" on line 146, col 66
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['showfullremote']")) # from line 146, col 66.
write(u'''</a></li>\r
\t\t\t\t</ul>\r
\t\t\t</div>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def extrasMenu(self, **KWS):
## CHEETAH: generated from #def extrasMenu at line 151, col 4.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''\t\t\t<ul>\r
''')
for extra in VFFSL(SL,"extras",True): # generated from line 153, col 5
if VFN(VFFSL(SL,"extra",True)["key"],"endswith",False)('lcd4linux/config'): # generated from line 154, col 6
write(u"""\t\t\t\t\t\t<li><a href='""")
_v = VFFSL(SL,"extra",True)["key"] # u'$extra["key"]' on line 155, col 20
if _v is not None: write(_filter(_v, rawExpr=u'$extra["key"]')) # from line 155, col 20.
write(u"""' target='_blank'>""")
_v = VFFSL(SL,"extra",True)["description"] # u'$extra["description"]' on line 155, col 51
if _v is not None: write(_filter(_v, rawExpr=u'$extra["description"]')) # from line 155, col 51.
write(u'''</a></li>\r
''')
else: # generated from line 156, col 6
write(u'''\t\t\t\t\t\t<li><a href=\'#\' onclick="load_maincontent(\'''')
_v = VFFSL(SL,"extra",True)["key"] # u'$extra["key"]' on line 157, col 50
if _v is not None: write(_filter(_v, rawExpr=u'$extra["key"]')) # from line 157, col 50.
write(u'''\'); return false;">''')
_v = VFFSL(SL,"extra",True)["description"] # u'$extra["description"]' on line 157, col 82
if _v is not None: write(_filter(_v, rawExpr=u'$extra["description"]')) # from line 157, col 82.
write(u'''</a></li>\r
''')
write(u'''\t\t\t</ul>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''\r
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\r
<html xmlns="http://www.w3.org/1999/xhtml">\r
<head>\r
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />\r
<link rel="shortcut icon" href="/images/favicon.png">\r
<link rel="stylesheet" type="text/css" href="/css/style.css" />\r
<link type="text/css" href="/css/jquery-ui-1.8.18.custom.css" rel="stylesheet" />\t\r
<script type="text/javascript" src="/js/jquery-1.6.2.min.js"></script>\r
<script type="text/javascript" src="/js/jquery-ui-1.8.18.custom.min.js"></script>\r
<script type="text/javascript" src="/js/openwebif.js"></script>\r
<script type="text/javascript" src="/js/transcoding.js"></script>\r
<script type="text/javascript">initJsTranslation(''')
_v = VFFSL(SL,"dumps",False)(VFFSL(SL,"tstrings",True)) # u'$dumps($tstrings)' on line 15, col 50
if _v is not None: write(_filter(_v, rawExpr=u'$dumps($tstrings)')) # from line 15, col 50.
write(u''')</script>\r
<title>Open Webif</title>\r
</head>\r
\r
<body>\r
\t<div id="container">\r
\t\t<div id="header">\r
\t\t\t<h1><a href="/">Open<span class="off">Webif</span></a></h1>\r
\t\t\t<h2>''')
_v = VFFSL(SL,"tstrings",True)['openwebif_header'] # u"$tstrings['openwebif_header']" on line 23, col 8
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['openwebif_header']")) # from line 23, col 8.
write(u'''</h2>\r
\t\t</div>\r
\t\t\r
\t\t<div id="statusheader">\r
\t\t\t<div id="osd">''')
_v = VFFSL(SL,"tstrings",True)['nothing_play'] # u"$tstrings['nothing_play']" on line 27, col 18
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['nothing_play']")) # from line 27, col 18.
write(u'''</div>\r
\t\t\t<div id="osd_status"></div>\r
\t\t\t<div id="osd_bottom"></div>\r
\t\t</div>\r
\t\t\r
\t\t<div id="dialog" title="Work in progress">\r
\t\t\t<p>Sorry, this function is not yet implemented.</p>\r
\t\t</div>\r
\t\t\r
\t\t<div id="leftmenu">\r
\t\t\r
\t\t\r
\r
\t\t\r
\t\t\r
\t\t\r
\t\t\t\r
\t\t\t\r
\t\t\r
\t\t\t<div id="menucontainer">\r
\t\t\t\t''')
_v = VFFSL(SL,"menu",False)(VFFSL(SL,"tstrings",True)['main'], "main", VFFSL(SL,"mainMenu",True)) # u'$menu($tstrings[\'main\'], "main", $mainMenu)' on line 164, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$menu($tstrings[\'main\'], "main", $mainMenu)')) # from line 164, col 5.
write(u'''\r
\t\t\t\t''')
_v = VFFSL(SL,"menu",False)(VFFSL(SL,"tstrings",True)['volumecontrol'], "volume", VFFSL(SL,"volumeMenu",True)) # u'$menu($tstrings[\'volumecontrol\'], "volume", $volumeMenu)' on line 165, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$menu($tstrings[\'volumecontrol\'], "volume", $volumeMenu)')) # from line 165, col 5.
write(u'''\r
\t\t\t\t''')
_v = VFFSL(SL,"menu",False)(VFFSL(SL,"tstrings",True)['boxcontrol'], "control", VFFSL(SL,"controlMenu",True)) # u'$menu($tstrings[\'boxcontrol\'], "control", $controlMenu)' on line 166, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$menu($tstrings[\'boxcontrol\'], "control", $controlMenu)')) # from line 166, col 5.
write(u'''\r
\t\t\t\t''')
_v = VFFSL(SL,"menu",False)(VFFSL(SL,"tstrings",True)['remote'], "remote", VFFSL(SL,"remoteMenu",True)) # u'$menu($tstrings[\'remote\'], "remote", $remoteMenu)' on line 167, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$menu($tstrings[\'remote\'], "remote", $remoteMenu)')) # from line 167, col 5.
write(u'''\r
\t\t\t\t''')
_v = VFFSL(SL,"menu",False)(VFFSL(SL,"tstrings",True)['info'], "info", VFFSL(SL,"infoMenu",True)) # u'$menu($tstrings[\'info\'], "info", $infoMenu)' on line 168, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$menu($tstrings[\'info\'], "info", $infoMenu)')) # from line 168, col 5.
write(u'''\r
\t\t\t\t''')
_v = VFFSL(SL,"menu",False)(VFFSL(SL,"tstrings",True)['stream'], "stream", VFFSL(SL,"streamMenu",True)) # u'$menu($tstrings[\'stream\'], "stream", $streamMenu)' on line 169, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$menu($tstrings[\'stream\'], "stream", $streamMenu)')) # from line 169, col 5.
write(u'''\r
\t\t\t\t''')
_v = VFFSL(SL,"menu",False)(VFFSL(SL,"tstrings",True)['extras'], "extras", VFFSL(SL,"extrasMenu",True)) # u'$menu($tstrings[\'extras\'], "extras", $extrasMenu)' on line 170, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$menu($tstrings[\'extras\'], "extras", $extrasMenu)')) # from line 170, col 5.
write(u'''\r
\t\t\t\t''')
_v = VFFSL(SL,"menu",False)(VFFSL(SL,"tstrings",True)['epgsearch'], "search", VFFSL(SL,"searchMenu",True)) # u'$menu($tstrings[\'epgsearch\'], "search", $searchMenu)' on line 171, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$menu($tstrings[\'epgsearch\'], "search", $searchMenu)')) # from line 171, col 5.
write(u'''\r
\t\t\t</div>\r
\t\t\t<div id="remotecontainer" style="display: none;">\r
\t\t\t\t<div id="leftmenu_main">\r
\t\t\t\t\t<div id="leftmenu_top">''')
_v = VFFSL(SL,"tstrings",True)['remote'] # u"$tstrings['remote']" on line 175, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['remote']")) # from line 175, col 29.
write(u'''</div>\r
\t\t\t\t\t<div style="width:100%; text-align:center;">\r
\t\t\t\t\t\t<div id="remote_container" style="width:100%; text-align:center;"></div>\r
\t\t\t\t\t\t<script type="text/javascript">\r
\t\t\t\t\t\t\t$(document).ready(function() {\r
\t\t\t\t\t\t\t\t$("#remote_container").load("/static/remotes/''')
_v = VFFSL(SL,"remote",True) # u'${remote}' on line 180, col 55
if _v is not None: write(_filter(_v, rawExpr=u'${remote}')) # from line 180, col 55.
write(u'''.html");\r
\t\t\t\t\t\t\t});\r
\t\t\t\t\t\t</script>\r
\t\t\t\t\t\t<div id="help">\r
\t\t\t\t\t\t\t''')
_v = VFFSL(SL,"tstrings",True)['shiftforlong'] # u"$tstrings['shiftforlong']" on line 184, col 8
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['shiftforlong']")) # from line 184, col 8.
write(u'''\r
\t\t\t\t\t\t</div>\r
\t\t\t\t\t\t<ul>\r
''')
if VFFSL(SL,"remotegrabscreenshot",True): # generated from line 187, col 8
write(u'''\t\t\t\t\t\t\t<li><input type="checkbox" name="remotegrabscreen" checked="checked" />''')
_v = VFFSL(SL,"tstrings",True)['grabscreenshot'] # u"$tstrings['grabscreenshot']" on line 188, col 79
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['grabscreenshot']")) # from line 188, col 79.
write(u'''</li>\r
''')
else: # generated from line 189, col 8
write(u'''\t\t\t\t\t\t\t<li><input type="checkbox" name="remotegrabscreen" />''')
_v = VFFSL(SL,"tstrings",True)['grabscreenshot'] # u"$tstrings['grabscreenshot']" on line 190, col 61
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['grabscreenshot']")) # from line 190, col 61.
write(u'''</li>\r
''')
write(u'''\t\t\t\t\t\t\t<li><a href="#" onclick="toggleFullRemote(); return false;" class="leftmenu_remotelink">''')
_v = VFFSL(SL,"tstrings",True)['hidefullremote'] # u"$tstrings['hidefullremote']" on line 192, col 96
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['hidefullremote']")) # from line 192, col 96.
write(u'''</a></li>\r
\t\t\t\t\t\t</ul>\r
\t\t\t\t\t</div>\r
\t\t\t\t</div>\r
\t\t\t</div>\r
\t\t</div>\r
\t\t\r
\t\t<div id="content">\r
\t\t\t<div id="content_container">\r
\t\t\t''')
_v = VFFSL(SL,"content",True) # u'$content' on line 201, col 4
if _v is not None: write(_filter(_v, rawExpr=u'$content')) # from line 201, col 4.
write(u'''\r
\t\t\t</div>\r
\t\t\t<div id="footer"><h3> <a href="https://github.com/E2OpenPlugins">E2OpenPlugins</a> | <a href="http://www.vuplus-community.net">Black Hole</a> | <a href="http://www.hdmedia-universe.com">HDMU</a> | <a href="http://openpli.org">OpenPli</a> | <a href="http://forum.sifteam.eu">Sif</a> | <a href="http://www.vuplus-support.org">VTi</a> | <a href="http://openspa.info">OpenSpa</a></h3></div>\r
\t\t</div>\r
\t</div>\r
\t<form name="portForm" action="/web/stream.m3u" method="GET" target="_blank">\r
\t\t<input type="hidden" name="ref">\r
\t\t<input type="hidden" name="name">\r
\t\t<input type="hidden" name="device">\r
\t</form>\r
\t<form name="portFormTs" action="/web/ts.m3u" method="GET" target="_blank">\r
\t\t<input type="hidden" name="file">\r
\t\t<input type="hidden" name="device">\r
\t</form>\r
</body>\r
\r
</html>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_main= 'respond'
## END CLASS DEFINITION
if not hasattr(main, '_initCheetahAttributes'):
templateAPIClass = getattr(main, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(main)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=main()).run()
|
MOA-2011/enigma2-plugin-extensions-openwebif
|
plugin/controllers/views/main.py
|
Python
|
gpl-2.0
| 34,105
|
[
"VisIt"
] |
dfe4e9e944dfae2a29c58c24adb6265dc99c36ce27f793148a775f4c1243be85
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2016 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database (Hobza) of interaction energies for bimolecular complexes.
| Geometries from <Reference>.
| Reference interaction energies from Rezac and Hobza, JCTC (in press).
- **cp** ``'off'`` <erase this comment and after unless on is a valid option> || ``'on'``
- **rlxd** ``'off'`` <erase this comment and after unless on is valid option> || ``'on'``
- **benchmark**
- ``'<benchmark_name>'`` <Reference>.
- |dl| ``'<default_benchmark_name>'`` |dr| <Reference>.
- **subset**
- ``'small'`` <members_description>
- ``'large'`` <members_description>
- ``'<subset>'`` <members_description>
"""
import re
import qcdb
# <<< A24 Database Module >>>
dbse = 'A24'
# <<< Database Members >>>
HRXN = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
HRXN_SM = []
HRXN_LG = []
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supermolecular calculations
for rxn in HRXN:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-%s-monoA-unCP' % (dbse, rxn) : -1,
'%s-%s-monoB-unCP' % (dbse, rxn) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-unCP' % (dbse, rxn),
'%s-%s-monoB-unCP' % (dbse, rxn) ]
# <<< Reference Values [kcal/mol] from Rezac and Hobza dx.doi.org/10.1021/ct400057w >>>
BIND = {}
BIND['%s-%s' % (dbse, 1 )] = -6.524
BIND['%s-%s' % (dbse, 2 )] = -5.014
BIND['%s-%s' % (dbse, 3 )] = -4.749
BIND['%s-%s' % (dbse, 4 )] = -4.572
BIND['%s-%s' % (dbse, 5 )] = -3.157
BIND['%s-%s' % (dbse, 6 )] = -1.679
BIND['%s-%s' % (dbse, 7 )] = -0.779
BIND['%s-%s' % (dbse, 8 )] = -0.672
BIND['%s-%s' % (dbse, 9 )] = -4.474
BIND['%s-%s' % (dbse, 10 )] = -2.578
BIND['%s-%s' % (dbse, 11 )] = -1.629
BIND['%s-%s' % (dbse, 12 )] = -1.537
BIND['%s-%s' % (dbse, 13 )] = -1.389
BIND['%s-%s' % (dbse, 14 )] = -1.110
BIND['%s-%s' % (dbse, 15 )] = -0.514
BIND['%s-%s' % (dbse, 16 )] = -1.518
BIND['%s-%s' % (dbse, 17 )] = -0.837
BIND['%s-%s' % (dbse, 18 )] = -0.615
BIND['%s-%s' % (dbse, 19 )] = -0.538
BIND['%s-%s' % (dbse, 20 )] = -0.408
BIND['%s-%s' % (dbse, 21 )] = -0.370
BIND['%s-%s' % (dbse, 22 )] = 0.784
BIND['%s-%s' % (dbse, 23 )] = 0.897
BIND['%s-%s' % (dbse, 24 )] = 1.075
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, 1)] = """ water_ammonia_Cs """
TAGL['%s-%s-dimer' % (dbse, 1)] = """Dimer from water_ammonia_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 1)] = """Monomer A water_ammonia_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 1)] = """Monomer B water_ammonia_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 1)] = """Monomer A water_ammonia_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 1)] = """Monomer B water_ammonia_Cs """
TAGL['%s-%s' % (dbse, 2)] = """ water_water_Cs """
TAGL['%s-%s-dimer' % (dbse, 2)] = """Dimer from water_water_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 2)] = """Monomer A from water_water_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 2)] = """Monomer B from water_water_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 2)] = """Monomer A from water_water_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 2)] = """Monomer B from water_water_Cs """
TAGL['%s-%s' % (dbse, 3)] = """ HCN_HCN_Cxv """
TAGL['%s-%s-dimer' % (dbse, 3)] = """Dimer from HCN_HCN_Cxv """
TAGL['%s-%s-monoA-CP' % (dbse, 3)] = """Monomer A from HCN_HCN_Cxv """
TAGL['%s-%s-monoB-CP' % (dbse, 3)] = """Monomer B from HCN_HCN_Cxv """
TAGL['%s-%s-monoA-unCP' % (dbse, 3)] = """Monomer A from HCN_HCN_Cxv """
TAGL['%s-%s-monoB-unCP' % (dbse, 3)] = """Monomer B from HCN_HCN_Cxv """
TAGL['%s-%s' % (dbse, 4)] = """ HF_HF_Cs """
TAGL['%s-%s-dimer' % (dbse, 4)] = """Dimer from HF_HF_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 4)] = """Monomer A from HF_HF_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 4)] = """Monomer B from HF_HF_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 4)] = """Monomer A from HF_HF_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 4)] = """Monomer B from HF_HF_Cs """
TAGL['%s-%s' % (dbse, 5)] = """ ammonia_ammonia_C2h """
TAGL['%s-%s-dimer' % (dbse, 5)] = """Dimer from ammonia_ammonia_C2h """
TAGL['%s-%s-monoA-CP' % (dbse, 5)] = """Monomer A from ammonia_ammonia_C2h """
TAGL['%s-%s-monoB-CP' % (dbse, 5)] = """Monomer B from ammonia_ammonia_C2h """
TAGL['%s-%s-monoA-unCP' % (dbse, 5)] = """Monomer A from ammonia_ammonia_C2h """
TAGL['%s-%s-monoB-unCP' % (dbse, 5)] = """Monomer B from ammonia_ammonia_C2h """
TAGL['%s-%s' % (dbse, 6)] = """ methane_HF_C3v """
TAGL['%s-%s-dimer' % (dbse, 6)] = """Dimer from methane_HF_C3v """
TAGL['%s-%s-monoA-CP' % (dbse, 6)] = """Monomer A from methane_HF_C3v """
TAGL['%s-%s-monoB-CP' % (dbse, 6)] = """Monomer B from methane_HF_C3v """
TAGL['%s-%s-monoA-unCP' % (dbse, 6)] = """Monomer A from methane_HF_C3v """
TAGL['%s-%s-monoB-unCP' % (dbse, 6)] = """Monomer B from methane_HF_C3v """
TAGL['%s-%s' % (dbse, 7)] = """ ammmonia_methane_C3v """
TAGL['%s-%s-dimer' % (dbse, 7)] = """Dimer from ammmonia_methane_C3v """
TAGL['%s-%s-monoA-CP' % (dbse, 7)] = """Monomer A from ammmonia_methane_C3v """
TAGL['%s-%s-monoB-CP' % (dbse, 7)] = """Monomer B from ammmonia_methane_C3v """
TAGL['%s-%s-monoA-unCP' % (dbse, 7)] = """Monomer A from ammmonia_methane_C3v """
TAGL['%s-%s-monoB-unCP' % (dbse, 7)] = """Monomer B from ammmonia_methane_C3v """
TAGL['%s-%s' % (dbse, 8)] = """ methane_water_Cs """
TAGL['%s-%s-dimer' % (dbse, 8)] = """Dimer from methane_water_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 8)] = """Monomer A from methane_water_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 8)] = """Monomer B from methane_water_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 8)] = """Monomer A from methane_water_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 8)] = """Monomer B from methane_water_Cs """
TAGL['%s-%s' % (dbse, 9)] = """ formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-dimer' % (dbse, 9)] = """Dimer from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 9)] = """Monomer A from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 9)] = """Monomer B from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 9)] = """Monomer A from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 9)] = """Monomer B from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s' % (dbse, 10)] = """ ethene_wat_Cs """
TAGL['%s-%s-dimer' % (dbse, 10)] = """Dimer from ethene_wat_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 10)] = """Monomer A from ethene_wat_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 10)] = """Monomer B from ethene_wat_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 10)] = """Monomer A from ethene_wat_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 10)] = """Monomer B from ethene_wat_Cs """
TAGL['%s-%s' % (dbse, 11)] = """ ethene_formaldehyde_Cs """
TAGL['%s-%s-dimer' % (dbse, 11)] = """Dimer from ethene_formaldehyde_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 11)] = """Monomer A from ethene_formaldehyde_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 11)] = """Monomer B from ethene_formaldehyde_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 11)] = """Monomer A from ethene_formaldehyde_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 11)] = """Monomer B from ethene_formaldehyde_Cs """
TAGL['%s-%s' % (dbse, 12)] = """ ethyne_ethyne_C2v """
TAGL['%s-%s-dimer' % (dbse, 12)] = """Dimer from ethyne_ethyne_C2v """
TAGL['%s-%s-monoA-CP' % (dbse, 12)] = """Monomer A from ethyne_ethyne_C2v """
TAGL['%s-%s-monoB-CP' % (dbse, 12)] = """Monomer B from ethyne_ethyne_C2v """
TAGL['%s-%s-monoA-unCP' % (dbse, 12)] = """Monomer A from ethyne_ethyne_C2v """
TAGL['%s-%s-monoB-unCP' % (dbse, 12)] = """Monomer B from ethyne_ethyne_C2v """
TAGL['%s-%s' % (dbse, 13)] = """ ethene_ammonia_Cs """
TAGL['%s-%s-dimer' % (dbse, 13)] = """Dimer from ethene_ammonia_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 13)] = """Monomer A from ethene_ammonia_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 13)] = """Monomer B from ethene_ammonia_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 13)] = """Monomer A from ethene_ammonia_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 13)] = """Monomer B from ethene_ammonia_Cs """
TAGL['%s-%s' % (dbse, 14)] = """ ethene_ethene_C2v """
TAGL['%s-%s-dimer' % (dbse, 14)] = """Dimer from ethene_ethene_C2v """
TAGL['%s-%s-monoA-CP' % (dbse, 14)] = """Monomer A from ethene_ethene_C2v """
TAGL['%s-%s-monoB-CP' % (dbse, 14)] = """Monomer B from ethene_ethene_C2v """
TAGL['%s-%s-monoA-unCP' % (dbse, 14)] = """Monomer A from ethene_ethene_C2v """
TAGL['%s-%s-monoB-unCP' % (dbse, 14)] = """Monomer B from ethene_ethene_C2v """
TAGL['%s-%s' % (dbse, 15)] = """ methane_ethene_Cs """
TAGL['%s-%s-dimer' % (dbse, 15)] = """Dimer from methane_ethene_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 15)] = """Monomer A from methane_ethene_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 15)] = """Monomer B from methane_ethene_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 15)] = """Monomer A from methane_ethene_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 15)] = """Monomer B from methane_ethene_Cs """
TAGL['%s-%s' % (dbse, 16)] = """ borane_methane_Cs """
TAGL['%s-%s-dimer' % (dbse, 16)] = """Dimer from borane_methane_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 16)] = """Monomer A from borane_methane_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 16)] = """Monomer B from borane_methane_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 16)] = """Monomer A from borane_methane_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 16)] = """Monomer B from borane_methane_Cs """
TAGL['%s-%s' % (dbse, 17)] = """ methane_ethane_Cs """
TAGL['%s-%s-dimer' % (dbse, 17)] = """Dimer from methane_ethane_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 17)] = """Monomer A from methane_ethane_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 17)] = """Monomer B from methane_ethane_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 17)] = """Monomer A from methane_ethane_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 17)] = """Monomer B from methane_ethane_Cs """
TAGL['%s-%s' % (dbse, 18)] = """ methane_ethane_C3 """
TAGL['%s-%s-dimer' % (dbse, 18)] = """Dimer from methane_ethane_C3 """
TAGL['%s-%s-monoA-CP' % (dbse, 18)] = """Monomer A from methane_ethane_C3 """
TAGL['%s-%s-monoB-CP' % (dbse, 18)] = """Monomer B from methane_ethane_C3 """
TAGL['%s-%s-monoA-unCP' % (dbse, 18)] = """Monomer A from methane_ethane_C3 """
TAGL['%s-%s-monoB-unCP' % (dbse, 18)] = """Monomer B from methane_ethane_C3 """
TAGL['%s-%s' % (dbse, 19)] = """ methane_methane_D3d """
TAGL['%s-%s-dimer' % (dbse, 19)] = """Dimer from methane_methane_D3d """
TAGL['%s-%s-monoA-CP' % (dbse, 19)] = """Monomer A from methane_methane_D3d """
TAGL['%s-%s-monoB-CP' % (dbse, 19)] = """Monomer B from methane_methane_D3d """
TAGL['%s-%s-monoA-unCP' % (dbse, 19)] = """Monomer A from methane_methane_D3d """
TAGL['%s-%s-monoB-unCP' % (dbse, 19)] = """Monomer B from methane_methane_D3d """
TAGL['%s-%s' % (dbse, 20)] = """ methane_Ar_C3v """
TAGL['%s-%s-dimer' % (dbse, 20)] = """Dimer from methane_Ar_C3v """
TAGL['%s-%s-monoA-CP' % (dbse, 20)] = """Monomer A from methane_Ar_C3v """
TAGL['%s-%s-monoB-CP' % (dbse, 20)] = """Monomer B from methane_Ar_C3v """
TAGL['%s-%s-monoA-unCP' % (dbse, 20)] = """Monomer A from methane_Ar_C3v """
TAGL['%s-%s-monoB-unCP' % (dbse, 20)] = """Monomer B from methane_Ar_C3v """
TAGL['%s-%s' % (dbse, 21)] = """ ethene_Ar_C2v """
TAGL['%s-%s-dimer' % (dbse, 21)] = """Dimer from ethene_Ar_C2v """
TAGL['%s-%s-monoA-CP' % (dbse, 21)] = """Monomer A from ethene_Ar_C2v """
TAGL['%s-%s-monoB-CP' % (dbse, 21)] = """Monomer B from ethene_Ar_C2v """
TAGL['%s-%s-monoA-unCP' % (dbse, 21)] = """Monomer A from ethene_Ar_C2v """
TAGL['%s-%s-monoB-unCP' % (dbse, 21)] = """Monomer B from ethene_Ar_C2v """
TAGL['%s-%s' % (dbse, 22)] = """ ethene_ethyne_C2v """
TAGL['%s-%s-dimer' % (dbse, 22)] = """Dimer from ethene_ethyne_C2v """
TAGL['%s-%s-monoA-CP' % (dbse, 22)] = """Monomer A from ethene_ethyne_C2v """
TAGL['%s-%s-monoB-CP' % (dbse, 22)] = """Monomer B from ethene_ethyne_C2v """
TAGL['%s-%s-monoA-unCP' % (dbse, 22)] = """Monomer A from ethene_ethyne_C2v """
TAGL['%s-%s-monoB-unCP' % (dbse, 22)] = """Monomer B from ethene_ethyne_C2v """
TAGL['%s-%s' % (dbse, 23)] = """ ethene_ethene_D2h """
TAGL['%s-%s-dimer' % (dbse, 23)] = """Dimer from ethene_ethene_D2h """
TAGL['%s-%s-monoA-CP' % (dbse, 23)] = """Monomer A from ethene_ethene_D2h """
TAGL['%s-%s-monoB-CP' % (dbse, 23)] = """Monomer B from ethene_ethene_D2h """
TAGL['%s-%s-monoA-unCP' % (dbse, 23)] = """Monomer A from ethene_ethene_D2h """
TAGL['%s-%s-monoB-unCP' % (dbse, 23)] = """Monomer B from ethene_ethene_D2h """
TAGL['%s-%s' % (dbse, 24)] = """ ethyne_ethyne_D2h """
TAGL['%s-%s-dimer' % (dbse, 24)] = """Dimer from ethyne_ethyne_D2h """
TAGL['%s-%s-monoA-CP' % (dbse, 24)] = """Monomer A from ethyne_ethyne_D2h """
TAGL['%s-%s-monoB-CP' % (dbse, 24)] = """Monomer B from ethyne_ethyne_D2h """
TAGL['%s-%s-monoA-unCP' % (dbse, 24)] = """Monomer A from ethyne_ethyne_D2h """
TAGL['%s-%s-monoB-unCP' % (dbse, 24)] = """Monomer B from ethyne_ethyne_D2h """
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-dimer' % (dbse, '1')] = qcdb.Molecule("""
0 1
O 0.00000000 -0.05786571 -1.47979303
H 0.00000000 0.82293384 -1.85541474
H 0.00000000 0.07949567 -0.51934253
--
0 1
N 0.00000000 0.01436394 1.46454628
H 0.00000000 -0.98104857 1.65344779
H -0.81348351 0.39876776 1.92934049
H 0.81348351 0.39876776 1.92934049
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '2')] = qcdb.Molecule("""
0 1
O -0.06699914 0.00000000 1.49435474
H 0.81573427 0.00000000 1.86586639
H 0.06885510 0.00000000 0.53914277
--
0 1
O 0.06254775 0.00000000 -1.42263208
H -0.40696540 -0.76017841 -1.77174450
H -0.40696540 0.76017841 -1.77174450
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '3')] = qcdb.Molecule("""
0 1
H 0.00000000 0.00000000 3.85521306
C 0.00000000 0.00000000 2.78649976
N 0.00000000 0.00000000 1.63150791
--
0 1
H 0.00000000 0.00000000 -0.59377492
C 0.00000000 0.00000000 -1.66809824
N 0.00000000 0.00000000 -2.82525056
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '4')] = qcdb.Molecule("""
0 1
H 0.00000000 0.80267982 1.69529329
F 0.00000000 -0.04596666 1.34034818
--
0 1
H 0.00000000 -0.12040787 -0.49082840
F 0.00000000 0.00976945 -1.40424978
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '5')] = qcdb.Molecule("""
0 1
N -0.04998129 -1.58709323 0.00000000
H 0.12296265 -2.16846018 0.81105976
H 0.12296265 -2.16846018 -0.81105976
H 0.65988580 -0.86235298 0.00000000
--
0 1
N 0.04998129 1.58709323 0.00000000
H -0.12296265 2.16846018 0.81105976
H -0.65988580 0.86235298 0.00000000
H -0.12296265 2.16846018 -0.81105976
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '6')] = qcdb.Molecule("""
0 1
C 0.00000000 -0.00000000 1.77071609
H 0.51593378 -0.89362352 1.42025061
H -0.00000000 0.00000000 2.85805859
H 0.51593378 0.89362352 1.42025061
H -1.03186756 0.00000000 1.42025061
--
0 1
H -0.00000000 0.00000000 -0.54877328
F -0.00000000 0.00000000 -1.46803256
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '7')] = qcdb.Molecule("""
0 1
N -0.00000000 0.00000000 1.84833659
H 0.93730979 -0.00000000 2.23206741
H -0.46865489 -0.81173409 2.23206741
H -0.46865489 0.81173409 2.23206741
--
0 1
H 0.00000000 -0.00000000 -0.94497174
C 0.00000000 -0.00000000 -2.03363752
H 0.51251439 0.88770096 -2.40095125
H 0.51251439 -0.88770096 -2.40095125
H -1.02502878 0.00000000 -2.40095125
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '8')] = qcdb.Molecule("""
0 1
C 0.00069016 0.00000000 -1.99985520
H -0.50741740 0.88759452 -2.37290605
H 1.03052749 0.00000000 -2.35282982
H -0.01314396 0.00000000 -0.91190852
H -0.50741740 -0.88759452 -2.37290605
--
0 1
O -0.00472553 0.00000000 1.71597466
H 0.03211863 0.75755459 2.30172044
H 0.03211863 -0.75755459 2.30172044
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '9')] = qcdb.Molecule("""
0 1
C 0.00000000 0.60123980 -1.35383976
O 0.00000000 -0.59301814 -1.55209021
H 0.93542250 1.17427624 -1.26515132
H -0.93542250 1.17427624 -1.26515132
--
0 1
C 0.00000000 -0.60200476 1.55228866
O 0.00000000 0.59238638 1.35511328
H 0.00000000 -1.00937982 2.57524635
H 0.00000000 -1.32002906 0.71694997
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '10')] = qcdb.Molecule("""
0 1
C 0.01058825 -0.66806246 1.29820809
C 0.01058825 0.66806246 1.29820809
H 0.86863216 1.23267933 0.95426815
H -0.84608285 1.23258495 1.64525385
H -0.84608285 -1.23258495 1.64525385
H 0.86863216 -1.23267933 0.95426815
--
0 1
H -0.79685627 0.00000000 -2.50911038
O 0.04347445 0.00000000 -2.04834054
H -0.19067546 0.00000000 -1.11576944
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '11')] = qcdb.Molecule("""
0 1
C 0.00000000 -0.59797089 1.47742864
C 0.00000000 0.42131196 2.33957848
H 0.92113351 -1.02957102 1.10653516
H -0.92113351 -1.02957102 1.10653516
H -0.92393815 0.85124826 2.70694633
H 0.92393815 0.85124826 2.70694633
--
0 1
O 0.00000000 -0.51877334 -1.82845679
C 0.00000000 0.68616220 -1.73709412
H 0.00000000 1.33077474 -2.63186355
H 0.00000000 1.18902807 -0.75645498
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '12')] = qcdb.Molecule("""
0 1
C 0.00000000 0.60356400 -2.18173438
H 0.00000000 1.66847581 -2.18429610
C 0.00000000 -0.60356400 -2.18173438
H 0.00000000 -1.66847581 -2.18429610
--
0 1
C -0.00000000 0.00000000 1.57829513
H -0.00000000 0.00000000 0.51136193
C -0.00000000 0.00000000 2.78576543
H -0.00000000 0.00000000 3.85017859
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '13')] = qcdb.Molecule("""
0 1
C 0.00000000 -0.59662248 1.58722206
C 0.00000000 0.68258238 1.20494642
H 0.92312147 1.22423658 1.04062463
H -0.92312147 1.22423658 1.04062463
H -0.92388993 -1.13738548 1.75121281
H 0.92388993 -1.13738548 1.75121281
--
0 1
N 0.00000000 -0.00401379 -2.31096701
H -0.81122549 -0.45983060 -2.71043881
H 0.00000000 -0.22249432 -1.32128161
H 0.81122549 -0.45983060 -2.71043881
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '14')] = qcdb.Molecule("""
0 1
H 0.92444510 -1.23172221 -1.90619313
H -0.92444510 -1.23172221 -1.90619313
H -0.92444510 1.23172221 -1.90619313
H 0.92444510 1.23172221 -1.90619313
C 0.00000000 0.66728778 -1.90556520
C 0.00000000 -0.66728778 -1.90556520
--
0 1
H -0.00000000 1.23344948 2.82931792
H 0.00000000 1.22547148 0.97776199
H -0.00000000 -1.22547148 0.97776199
H -0.00000000 -1.23344948 2.82931792
C -0.00000000 -0.66711698 1.90601042
C -0.00000000 0.66711698 1.90601042
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '15')] = qcdb.Molecule("""
0 1
C 0.00000000 0.64634385 -1.60849815
C 0.00000000 -0.67914355 -1.45381675
H -0.92399961 -1.24016223 -1.38784883
H 0.92399961 -1.24016223 -1.38784883
H 0.92403607 1.20737602 -1.67357285
H -0.92403607 1.20737602 -1.67357285
--
0 1
H 0.00000000 0.08295411 1.59016711
C 0.00000000 0.02871509 2.67711785
H 0.88825459 0.52261990 3.06664029
H -0.88825459 0.52261990 3.06664029
H 0.00000000 -1.01394800 2.98955227
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '16')] = qcdb.Molecule("""
0 1
C 0.00346000 0.00000000 1.38045208
H 0.84849635 0.00000000 0.68958651
H 0.39513333 0.00000000 2.39584935
H -0.60268447 -0.88994299 1.22482674
H -0.60268447 0.88994299 1.22482674
--
0 1
B -0.00555317 0.00000000 -1.59887976
H 0.58455128 -1.03051800 -1.67949525
H 0.58455128 1.03051800 -1.67949525
H -1.18903148 0.00000000 -1.47677217
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '17')] = qcdb.Molecule("""
0 1
C 0.00000000 -0.06374421 2.42054090
H 0.00000000 1.02169396 2.34238038
H 0.88828307 -0.46131911 1.93307194
H -0.88828307 -0.46131911 1.93307194
H 0.00000000 -0.35363606 3.46945195
--
0 1
C 0.00000000 0.78133572 -1.13543912
H 0.00000000 1.37465349 -2.05114442
H -0.88043002 1.06310554 -0.55580918
C 0.00000000 -0.71332890 -1.44723686
H 0.88043002 1.06310554 -0.55580918
H 0.00000000 -1.30641812 -0.53140693
H -0.88100343 -0.99533072 -2.02587154
H 0.88100343 -0.99533072 -2.02587154
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '18')] = qcdb.Molecule("""
0 1
C -0.00000000 0.00000000 -2.85810471
H 0.39304720 -0.94712229 -2.49369739
H 0.62370837 0.81395000 -2.49369739
H -1.01675556 0.13317229 -2.49369739
H 0.00000000 -0.00000000 -3.94634214
--
0 1
C 0.00000000 -0.00000000 0.76143405
C -0.00000000 -0.00000000 2.28821715
H -0.61711193 -0.80824397 0.36571527
H -0.39140385 0.93855659 0.36571527
H 1.00851577 -0.13031262 0.36571527
H -1.00891703 0.13031295 2.68258296
H 0.39160418 -0.93890425 2.68258296
H 0.61731284 0.80859130 2.68258296
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '19')] = qcdb.Molecule("""
0 1
C -0.00000000 0.00000000 1.81901457
H 0.51274115 0.88809373 1.45476743
H 0.51274115 -0.88809373 1.45476743
H -1.02548230 0.00000000 1.45476743
H 0.00000000 -0.00000000 2.90722072
--
0 1
C 0.00000000 -0.00000000 -1.81901457
H -0.00000000 0.00000000 -2.90722072
H -0.51274115 0.88809373 -1.45476743
H -0.51274115 -0.88809373 -1.45476743
H 1.02548230 -0.00000000 -1.45476743
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '20')] = qcdb.Molecule("""
0 1
C -0.00000000 0.00000000 -2.62458428
H 0.51286762 0.88831278 -2.26110195
H 0.51286762 -0.88831278 -2.26110195
H -0.00000000 0.00000000 -3.71273928
H -1.02573525 0.00000000 -2.26110195
--
0 1
AR -0.00000000 0.00000000 1.05395172
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '21')] = qcdb.Molecule("""
0 1
C 0.00000000 0.66718073 -2.29024825
C 0.00000000 -0.66718073 -2.29024825
H -0.92400768 1.23202333 -2.28975239
H 0.92400768 1.23202333 -2.28975239
H -0.92400768 -1.23202333 -2.28975239
H 0.92400768 -1.23202333 -2.28975239
--
0 1
AR -0.00000000 0.00000000 1.60829261
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '22')] = qcdb.Molecule("""
0 1
H -0.92396100 1.23195600 -1.68478123
H 0.92396100 1.23195600 -1.68478123
H 0.92396100 -1.23195600 -1.68478123
H -0.92396100 -1.23195600 -1.68478123
C 0.00000000 0.66717600 -1.68478123
C 0.00000000 -0.66717600 -1.68478123
--
0 1
H -0.00000000 -1.66786500 1.81521877
H -0.00000000 1.66786500 1.81521877
C -0.00000000 -0.60339700 1.81521877
C -0.00000000 0.60339700 1.81521877
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '23')] = qcdb.Molecule("""
0 1
H -0.92396100 1.23195600 -1.75000000
H 0.92396100 1.23195600 -1.75000000
H 0.92396100 -1.23195600 -1.75000000
H -0.92396100 -1.23195600 -1.75000000
C 0.00000000 0.66717600 -1.75000000
C -0.00000000 -0.66717600 -1.75000000
--
0 1
H -0.92396100 1.23195600 1.75000000
H 0.92396100 1.23195600 1.75000000
H 0.92396100 -1.23195600 1.75000000
H -0.92396100 -1.23195600 1.75000000
C 0.00000000 0.66717600 1.75000000
C -0.00000000 -0.66717600 1.75000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '24')] = qcdb.Molecule("""
0 1
H -0.00000000 -1.66786500 -1.75000000
H 0.00000000 1.66786500 -1.75000000
C -0.00000000 -0.60339700 -1.75000000
C 0.00000000 0.60339700 -1.75000000
--
0 1
H -0.00000000 -1.66786500 1.75000000
H 0.00000000 1.66786500 1.75000000
C -0.00000000 -0.60339700 1.75000000
C 0.00000000 0.60339700 1.75000000
units angstrom
""")
# <<< Derived Geometry Strings >>>
for rxn in HRXN:
GEOS['%s-%s-monoA-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1)
GEOS['%s-%s-monoB-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2)
GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)
GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)
|
kannon92/psi4
|
psi4/share/psi4/databases/A24.py
|
Python
|
gpl-2.0
| 29,724
|
[
"Psi4"
] |
759901dea91fc61c67a8e6da7d15c5a3cd6efe078b2fd1189b455b37ffc31d27
|
__author__ = 'crystal'
import sys
from PyQt5.QtWidgets import QApplication, QDialog
from checkSumGui import Ui_Form
app = QApplication(sys.argv)
window = QDialog()
ui = Ui_Form()
ui.setupUi(window)
window.show()
sys.exit(app.exec_())
|
karcio/checkSumValidatorGUI
|
checkSumVal/src/checkSum.py
|
Python
|
gpl-3.0
| 238
|
[
"CRYSTAL"
] |
f1cb50c396c6a3175ee819b6428fbf76b625d9c637c97a8b70526e9fb1641130
|
from numpy.testing import assert_equal, assert_array_equal, assert_allclose
from nose.tools import assert_true, assert_raises, assert_not_equal
from copy import deepcopy
import os.path as op
import numpy as np
from scipy import sparse
import os
import warnings
from mne import read_evokeds
from mne.datasets import testing
from mne.externals.six.moves import StringIO
from mne.io import show_fiff, read_raw_fif
from mne.epochs import _segment_raw
from mne.time_frequency import tfr_morlet
from mne.utils import (set_log_level, set_log_file, _TempDir,
get_config, set_config, deprecated, _fetch_file,
sum_squared, estimate_rank,
_url_to_local_path, sizeof_fmt, _check_subject,
_check_type_picks, object_hash, object_diff,
requires_good_network, run_tests_if_main, md5sum,
ArgvSetter, _memory_usage, check_random_state,
_check_mayavi_version, requires_mayavi,
set_memmap_min_size, _get_stim_channel, _check_fname,
create_slices, _time_mask, random_permutation,
_get_call_line, compute_corr, sys_info, verbose,
check_fname, requires_ftp, get_config_path,
object_size, buggy_mkl_svd, _get_inst_data,
copy_doc, copy_function_doc_to_method_doc)
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
fname_evoked = op.join(base_dir, 'test-ave.fif')
fname_raw = op.join(base_dir, 'test_raw.fif')
fname_log = op.join(base_dir, 'test-ave.log')
fname_log_2 = op.join(base_dir, 'test-ave-2.log')
data_path = testing.data_path(download=False)
fname_fsaverage_trans = op.join(data_path, 'subjects', 'fsaverage', 'bem',
'fsaverage-trans.fif')
def clean_lines(lines=[]):
# Function to scrub filenames for checking logging output (in test_logging)
return [l if 'Reading ' not in l else 'Reading test file' for l in lines]
def test_buggy_mkl():
"""Test decorator for buggy MKL issues"""
from nose.plugins.skip import SkipTest
@buggy_mkl_svd
def foo(a, b):
raise np.linalg.LinAlgError('SVD did not converge')
with warnings.catch_warnings(record=True) as w:
assert_raises(SkipTest, foo, 1, 2)
assert_true(all('convergence error' in str(ww.message) for ww in w))
@buggy_mkl_svd
def bar(c, d, e):
raise RuntimeError('SVD did not converge')
assert_raises(RuntimeError, bar, 1, 2, 3)
def test_sys_info():
"""Test info-showing utility
"""
out = StringIO()
sys_info(fid=out)
out = out.getvalue()
assert_true('numpy:' in out)
def test_get_call_line():
"""Test getting a call line
"""
@verbose
def foo(verbose=None):
return _get_call_line(in_verbose=True)
for v in (None, True):
my_line = foo(verbose=v) # testing
assert_equal(my_line, 'my_line = foo(verbose=v) # testing')
def bar():
return _get_call_line(in_verbose=False)
my_line = bar() # testing more
assert_equal(my_line, 'my_line = bar() # testing more')
def test_object_size():
"""Test object size estimation"""
assert_true(object_size(np.ones(10, np.float32)) <
object_size(np.ones(10, np.float64)))
for lower, upper, obj in ((0, 60, ''),
(0, 30, 1),
(0, 30, 1.),
(0, 60, 'foo'),
(0, 150, np.ones(0)),
(0, 150, np.int32(1)),
(150, 500, np.ones(20)),
(100, 400, dict()),
(400, 1000, dict(a=np.ones(50))),
(200, 900, sparse.eye(20, format='csc')),
(200, 900, sparse.eye(20, format='csr'))):
size = object_size(obj)
assert_true(lower < size < upper,
msg='%s < %s < %s:\n%s' % (lower, size, upper, obj))
def test_get_inst_data():
"""Test _get_inst_data"""
raw = read_raw_fif(fname_raw, add_eeg_ref=False)
raw.crop(tmax=1.)
assert_equal(_get_inst_data(raw), raw._data)
raw.pick_channels(raw.ch_names[:2])
epochs = _segment_raw(raw, 0.5)
assert_equal(_get_inst_data(epochs), epochs._data)
evoked = epochs.average()
assert_equal(_get_inst_data(evoked), evoked.data)
evoked.crop(tmax=0.1)
picks = list(range(2))
freqs = np.array([50., 55.])
n_cycles = 3
tfr = tfr_morlet(evoked, freqs, n_cycles, return_itc=False, picks=picks)
assert_equal(_get_inst_data(tfr), tfr.data)
assert_raises(TypeError, _get_inst_data, 'foo')
def test_misc():
"""Test misc utilities"""
assert_equal(_memory_usage(-1)[0], -1)
assert_equal(_memory_usage((clean_lines, [], {}))[0], -1)
assert_equal(_memory_usage(clean_lines)[0], -1)
assert_raises(ValueError, check_random_state, 'foo')
assert_raises(ValueError, set_memmap_min_size, 1)
assert_raises(ValueError, set_memmap_min_size, 'foo')
assert_raises(TypeError, get_config, 1)
assert_raises(TypeError, set_config, 1)
assert_raises(TypeError, set_config, 'foo', 1)
assert_raises(TypeError, _get_stim_channel, 1, None)
assert_raises(TypeError, _get_stim_channel, [1], None)
assert_raises(TypeError, _check_fname, 1)
assert_raises(IOError, check_fname, 'foo', 'tets-dip.x', (), ('.fif',))
assert_raises(ValueError, _check_subject, None, None)
assert_raises(ValueError, _check_subject, None, 1)
assert_raises(ValueError, _check_subject, 1, None)
@requires_mayavi
def test_check_mayavi():
"""Test mayavi version check"""
assert_raises(RuntimeError, _check_mayavi_version, '100.0.0')
def test_run_tests_if_main():
"""Test run_tests_if_main functionality"""
x = []
def test_a():
x.append(True)
@np.testing.dec.skipif(True)
def test_b():
return
try:
__name__ = '__main__'
run_tests_if_main(measure_mem=False) # dual meas causes problems
def test_c():
raise RuntimeError
try:
__name__ = '__main__'
run_tests_if_main(measure_mem=False) # dual meas causes problems
except RuntimeError:
pass
else:
raise RuntimeError('Error not raised')
finally:
del __name__
assert_true(len(x) == 2)
assert_true(x[0] and x[1])
def test_hash():
"""Test dictionary hashing and comparison functions"""
# does hashing all of these types work:
# {dict, list, tuple, ndarray, str, float, int, None}
d0 = dict(a=dict(a=0.1, b='fo', c=1), b=[1, 'b'], c=(), d=np.ones(3),
e=None)
d0[1] = None
d0[2.] = b'123'
d1 = deepcopy(d0)
assert_true(len(object_diff(d0, d1)) == 0)
assert_true(len(object_diff(d1, d0)) == 0)
assert_equal(object_hash(d0), object_hash(d1))
# change values slightly
d1['data'] = np.ones(3, int)
d1['d'][0] = 0
assert_not_equal(object_hash(d0), object_hash(d1))
d1 = deepcopy(d0)
assert_equal(object_hash(d0), object_hash(d1))
d1['a']['a'] = 0.11
assert_true(len(object_diff(d0, d1)) > 0)
assert_true(len(object_diff(d1, d0)) > 0)
assert_not_equal(object_hash(d0), object_hash(d1))
d1 = deepcopy(d0)
assert_equal(object_hash(d0), object_hash(d1))
d1['a']['d'] = 0 # non-existent key
assert_true(len(object_diff(d0, d1)) > 0)
assert_true(len(object_diff(d1, d0)) > 0)
assert_not_equal(object_hash(d0), object_hash(d1))
d1 = deepcopy(d0)
assert_equal(object_hash(d0), object_hash(d1))
d1['b'].append(0) # different-length lists
assert_true(len(object_diff(d0, d1)) > 0)
assert_true(len(object_diff(d1, d0)) > 0)
assert_not_equal(object_hash(d0), object_hash(d1))
d1 = deepcopy(d0)
assert_equal(object_hash(d0), object_hash(d1))
d1['e'] = 'foo' # non-None
assert_true(len(object_diff(d0, d1)) > 0)
assert_true(len(object_diff(d1, d0)) > 0)
assert_not_equal(object_hash(d0), object_hash(d1))
d1 = deepcopy(d0)
d2 = deepcopy(d0)
d1['e'] = StringIO()
d2['e'] = StringIO()
d2['e'].write('foo')
assert_true(len(object_diff(d0, d1)) > 0)
assert_true(len(object_diff(d1, d0)) > 0)
d1 = deepcopy(d0)
d1[1] = 2
assert_true(len(object_diff(d0, d1)) > 0)
assert_true(len(object_diff(d1, d0)) > 0)
assert_not_equal(object_hash(d0), object_hash(d1))
# generators (and other types) not supported
d1 = deepcopy(d0)
d2 = deepcopy(d0)
d1[1] = (x for x in d0)
d2[1] = (x for x in d0)
assert_raises(RuntimeError, object_diff, d1, d2)
assert_raises(RuntimeError, object_hash, d1)
x = sparse.eye(2, 2, format='csc')
y = sparse.eye(2, 2, format='csr')
assert_true('type mismatch' in object_diff(x, y))
y = sparse.eye(2, 2, format='csc')
assert_equal(len(object_diff(x, y)), 0)
y[1, 1] = 2
assert_true('elements' in object_diff(x, y))
y = sparse.eye(3, 3, format='csc')
assert_true('shape' in object_diff(x, y))
y = 0
assert_true('type mismatch' in object_diff(x, y))
def test_md5sum():
"""Test md5sum calculation
"""
tempdir = _TempDir()
fname1 = op.join(tempdir, 'foo')
fname2 = op.join(tempdir, 'bar')
with open(fname1, 'wb') as fid:
fid.write(b'abcd')
with open(fname2, 'wb') as fid:
fid.write(b'efgh')
assert_equal(md5sum(fname1), md5sum(fname1, 1))
assert_equal(md5sum(fname2), md5sum(fname2, 1024))
assert_true(md5sum(fname1) != md5sum(fname2))
def test_tempdir():
"""Test TempDir
"""
tempdir2 = _TempDir()
assert_true(op.isdir(tempdir2))
x = str(tempdir2)
del tempdir2
assert_true(not op.isdir(x))
def test_estimate_rank():
"""Test rank estimation
"""
data = np.eye(10)
assert_array_equal(estimate_rank(data, return_singular=True)[1],
np.ones(10))
data[0, 0] = 0
assert_equal(estimate_rank(data), 9)
assert_raises(ValueError, estimate_rank, data, 'foo')
def test_logging():
"""Test logging (to file)
"""
assert_raises(ValueError, set_log_level, 'foo')
tempdir = _TempDir()
test_name = op.join(tempdir, 'test.log')
with open(fname_log, 'r') as old_log_file:
old_lines = clean_lines(old_log_file.readlines())
with open(fname_log_2, 'r') as old_log_file_2:
old_lines_2 = clean_lines(old_log_file_2.readlines())
if op.isfile(test_name):
os.remove(test_name)
# test it one way (printing default off)
set_log_file(test_name)
set_log_level('WARNING')
# should NOT print
evoked = read_evokeds(fname_evoked, condition=1)
with open(test_name) as fid:
assert_true(fid.readlines() == [])
# should NOT print
evoked = read_evokeds(fname_evoked, condition=1, verbose=False)
with open(test_name) as fid:
assert_true(fid.readlines() == [])
# should NOT print
evoked = read_evokeds(fname_evoked, condition=1, verbose='WARNING')
with open(test_name) as fid:
assert_true(fid.readlines() == [])
# SHOULD print
evoked = read_evokeds(fname_evoked, condition=1, verbose=True)
with open(test_name, 'r') as new_log_file:
new_lines = clean_lines(new_log_file.readlines())
assert_equal(new_lines, old_lines)
set_log_file(None) # Need to do this to close the old file
os.remove(test_name)
# now go the other way (printing default on)
set_log_file(test_name)
set_log_level('INFO')
# should NOT print
evoked = read_evokeds(fname_evoked, condition=1, verbose='WARNING')
with open(test_name) as fid:
assert_true(fid.readlines() == [])
# should NOT print
evoked = read_evokeds(fname_evoked, condition=1, verbose=False)
with open(test_name) as fid:
assert_true(fid.readlines() == [])
# SHOULD print
evoked = read_evokeds(fname_evoked, condition=1)
with open(test_name, 'r') as new_log_file:
new_lines = clean_lines(new_log_file.readlines())
assert_equal(new_lines, old_lines)
# check to make sure appending works (and as default, raises a warning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
set_log_file(test_name, overwrite=False)
assert_equal(len(w), 0)
set_log_file(test_name)
assert_equal(len(w), 1)
assert_true('test_utils.py' in w[0].filename)
evoked = read_evokeds(fname_evoked, condition=1)
with open(test_name, 'r') as new_log_file:
new_lines = clean_lines(new_log_file.readlines())
assert_equal(new_lines, old_lines_2)
# make sure overwriting works
set_log_file(test_name, overwrite=True)
# this line needs to be called to actually do some logging
evoked = read_evokeds(fname_evoked, condition=1)
del evoked
with open(test_name, 'r') as new_log_file:
new_lines = clean_lines(new_log_file.readlines())
assert_equal(new_lines, old_lines)
def test_config():
"""Test mne-python config file support"""
tempdir = _TempDir()
key = '_MNE_PYTHON_CONFIG_TESTING'
value = '123456'
old_val = os.getenv(key, None)
os.environ[key] = value
assert_true(get_config(key) == value)
del os.environ[key]
# catch the warning about it being a non-standard config key
assert_true(len(set_config(None, None)) > 10) # tuple of valid keys
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
set_config(key, None, home_dir=tempdir, set_env=False)
assert_true(len(w) == 1)
assert_true(get_config(key, home_dir=tempdir) is None)
assert_raises(KeyError, get_config, key, raise_error=True)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
assert_true(key not in os.environ)
set_config(key, value, home_dir=tempdir, set_env=True)
assert_true(key in os.environ)
assert_true(get_config(key, home_dir=tempdir) == value)
set_config(key, None, home_dir=tempdir, set_env=True)
assert_true(key not in os.environ)
set_config(key, None, home_dir=tempdir, set_env=True)
assert_true(key not in os.environ)
if old_val is not None:
os.environ[key] = old_val
# Check if get_config with no input returns all config
key = 'MNE_PYTHON_TESTING_KEY'
config = {key: value}
with warnings.catch_warnings(record=True): # non-standard key
warnings.simplefilter('always')
set_config(key, value, home_dir=tempdir)
assert_equal(get_config(home_dir=tempdir), config)
# Check what happens when we use a corrupted file
json_fname = get_config_path(home_dir=tempdir)
with open(json_fname, 'w') as fid:
fid.write('foo{}')
with warnings.catch_warnings(record=True) as w:
assert_equal(get_config(home_dir=tempdir), dict())
assert_true(any('not a valid JSON' in str(ww.message) for ww in w))
with warnings.catch_warnings(record=True) as w: # non-standard key
assert_raises(RuntimeError, set_config, key, 'true', home_dir=tempdir)
@testing.requires_testing_data
def test_show_fiff():
"""Test show_fiff
"""
# this is not exhaustive, but hopefully bugs will be found in use
info = show_fiff(fname_evoked)
keys = ['FIFF_EPOCH', 'FIFFB_HPI_COIL', 'FIFFB_PROJ_ITEM',
'FIFFB_PROCESSED_DATA', 'FIFFB_EVOKED', 'FIFF_NAVE',
'FIFF_EPOCH']
assert_true(all(key in info for key in keys))
info = show_fiff(fname_raw, read_limit=1024)
assert_true('COORD_TRANS' in show_fiff(fname_fsaverage_trans))
@deprecated('message')
def deprecated_func():
pass
@deprecated('message')
class deprecated_class(object):
def __init__(self):
pass
def test_deprecated():
"""Test deprecated function
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
deprecated_func()
assert_true(len(w) == 1)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
deprecated_class()
assert_true(len(w) == 1)
def _test_fetch(url):
"""Helper to test URL retrieval"""
tempdir = _TempDir()
with ArgvSetter(disable_stderr=False): # to capture stdout
archive_name = op.join(tempdir, "download_test")
_fetch_file(url, archive_name, timeout=30., verbose=False,
resume=False)
assert_raises(Exception, _fetch_file, 'NOT_AN_ADDRESS',
op.join(tempdir, 'test'), verbose=False)
resume_name = op.join(tempdir, "download_resume")
# touch file
with open(resume_name + '.part', 'w'):
os.utime(resume_name + '.part', None)
_fetch_file(url, resume_name, resume=True, timeout=30.,
verbose=False)
assert_raises(ValueError, _fetch_file, url, archive_name,
hash_='a', verbose=False)
assert_raises(RuntimeError, _fetch_file, url, archive_name,
hash_='a' * 32, verbose=False)
@requires_good_network
def test_fetch_file_html():
"""Test file downloading over http"""
_test_fetch('http://google.com')
@requires_ftp
@requires_good_network
def test_fetch_file_ftp():
"""Test file downloading over ftp"""
_test_fetch('ftp://speedtest.tele2.net/1KB.zip')
def test_sum_squared():
"""Test optimized sum of squares
"""
X = np.random.RandomState(0).randint(0, 50, (3, 3))
assert_equal(np.sum(X ** 2), sum_squared(X))
def test_sizeof_fmt():
"""Test sizeof_fmt
"""
assert_equal(sizeof_fmt(0), '0 bytes')
assert_equal(sizeof_fmt(1), '1 byte')
assert_equal(sizeof_fmt(1000), '1000 bytes')
def test_url_to_local_path():
"""Test URL to local path
"""
assert_equal(_url_to_local_path('http://google.com/home/why.html', '.'),
op.join('.', 'home', 'why.html'))
def test_check_type_picks():
"""Test checking type integrity checks of picks
"""
picks = np.arange(12)
assert_array_equal(picks, _check_type_picks(picks))
picks = list(range(12))
assert_array_equal(np.array(picks), _check_type_picks(picks))
picks = None
assert_array_equal(None, _check_type_picks(picks))
picks = ['a', 'b']
assert_raises(ValueError, _check_type_picks, picks)
picks = 'b'
assert_raises(ValueError, _check_type_picks, picks)
def test_compute_corr():
"""Test Anscombe's Quartett
"""
x = np.array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])
y = np.array([[8.04, 6.95, 7.58, 8.81, 8.33, 9.96,
7.24, 4.26, 10.84, 4.82, 5.68],
[9.14, 8.14, 8.74, 8.77, 9.26, 8.10,
6.13, 3.10, 9.13, 7.26, 4.74],
[7.46, 6.77, 12.74, 7.11, 7.81, 8.84,
6.08, 5.39, 8.15, 6.42, 5.73],
[8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8],
[6.58, 5.76, 7.71, 8.84, 8.47, 7.04,
5.25, 12.50, 5.56, 7.91, 6.89]])
r = compute_corr(x, y.T)
r2 = np.array([np.corrcoef(x, y[i])[0, 1]
for i in range(len(y))])
assert_allclose(r, r2)
assert_raises(ValueError, compute_corr, [1, 2], [])
def test_create_slices():
"""Test checking the create of time create_slices
"""
# Test that create_slices default provide an empty list
assert_true(create_slices(0, 0) == [])
# Test that create_slice return correct number of slices
assert_true(len(create_slices(0, 100)) == 100)
# Test with non-zero start parameters
assert_true(len(create_slices(50, 100)) == 50)
# Test slices' length with non-zero start and window_width=2
assert_true(len(create_slices(0, 100, length=2)) == 50)
# Test slices' length with manual slice separation
assert_true(len(create_slices(0, 100, step=10)) == 10)
# Test slices' within length for non-consecutive samples
assert_true(len(create_slices(0, 500, length=50, step=10)) == 46)
# Test that slices elements start, stop and step correctly
slices = create_slices(0, 10)
assert_true(slices[0].start == 0)
assert_true(slices[0].step == 1)
assert_true(slices[0].stop == 1)
assert_true(slices[-1].stop == 10)
# Same with larger window width
slices = create_slices(0, 9, length=3)
assert_true(slices[0].start == 0)
assert_true(slices[0].step == 1)
assert_true(slices[0].stop == 3)
assert_true(slices[-1].stop == 9)
# Same with manual slices' separation
slices = create_slices(0, 9, length=3, step=1)
assert_true(len(slices) == 7)
assert_true(slices[0].step == 1)
assert_true(slices[0].stop == 3)
assert_true(slices[-1].start == 6)
assert_true(slices[-1].stop == 9)
def test_time_mask():
"""Test safe time masking
"""
N = 10
x = np.arange(N).astype(float)
assert_equal(_time_mask(x, 0, N - 1).sum(), N)
assert_equal(_time_mask(x - 1e-10, 0, N - 1, sfreq=1000.).sum(), N)
assert_equal(_time_mask(x - 1e-10, None, N - 1, sfreq=1000.).sum(), N)
assert_equal(_time_mask(x - 1e-10, None, None, sfreq=1000.).sum(), N)
assert_equal(_time_mask(x - 1e-10, -np.inf, None, sfreq=1000.).sum(), N)
assert_equal(_time_mask(x - 1e-10, None, np.inf, sfreq=1000.).sum(), N)
# non-uniformly spaced inputs
x = np.array([4, 10])
assert_equal(_time_mask(x[:1], tmin=10, sfreq=1,
raise_error=False).sum(), 0)
assert_equal(_time_mask(x[:1], tmin=11, tmax=12, sfreq=1,
raise_error=False).sum(), 0)
assert_equal(_time_mask(x, tmin=10, sfreq=1).sum(), 1)
assert_equal(_time_mask(x, tmin=6, sfreq=1).sum(), 1)
assert_equal(_time_mask(x, tmin=5, sfreq=1).sum(), 1)
assert_equal(_time_mask(x, tmin=4.5001, sfreq=1).sum(), 1)
assert_equal(_time_mask(x, tmin=4.4999, sfreq=1).sum(), 2)
assert_equal(_time_mask(x, tmin=4, sfreq=1).sum(), 2)
# degenerate cases
assert_raises(ValueError, _time_mask, x[:1], tmin=11, tmax=12)
assert_raises(ValueError, _time_mask, x[:1], tmin=10, sfreq=1)
def test_random_permutation():
"""Test random permutation function
"""
n_samples = 10
random_state = 42
python_randperm = random_permutation(n_samples, random_state)
# matlab output when we execute rng(42), randperm(10)
matlab_randperm = np.array([7, 6, 5, 1, 4, 9, 10, 3, 8, 2])
assert_array_equal(python_randperm, matlab_randperm - 1)
def test_copy_doc():
'''Test decorator for copying docstrings'''
class A:
def m1():
"""Docstring for m1"""
pass
class B:
def m1():
pass
class C (A):
@copy_doc(A.m1)
def m1():
pass
assert_equal(C.m1.__doc__, 'Docstring for m1')
assert_raises(ValueError, copy_doc(B.m1), C.m1)
def test_copy_function_doc_to_method_doc():
'''Test decorator for re-using function docstring as method docstrings'''
def f1(object, a, b, c):
"""Docstring for f1
Parameters
----------
object : object
Some object. This description also has
blank lines in it.
a : int
Parameter a
b : int
Parameter b
"""
pass
def f2(object):
"""Docstring for f2
Parameters
----------
object : object
Only one parameter
Returns
-------
nothing.
"""
pass
def f3(object):
"""Docstring for f3
Parameters
----------
object : object
Only one parameter
"""
pass
def f4(object):
"""Docstring for f4"""
pass
def f5(object):
"""Docstring for f5
Parameters
----------
Returns
-------
nothing.
"""
pass
class A:
@copy_function_doc_to_method_doc(f1)
def method_f1(self, a, b, c):
pass
@copy_function_doc_to_method_doc(f2)
def method_f2(self):
"method_f3 own docstring"
pass
@copy_function_doc_to_method_doc(f3)
def method_f3(self):
pass
assert_equal(
A.method_f1.__doc__,
"""Docstring for f1
Parameters
----------
a : int
Parameter a
b : int
Parameter b
"""
)
assert_equal(
A.method_f2.__doc__,
"""Docstring for f2
Returns
-------
nothing.
method_f3 own docstring"""
)
assert_equal(A.method_f3.__doc__, 'Docstring for f3\n\n ')
assert_raises(ValueError, copy_function_doc_to_method_doc(f4), A.method_f1)
assert_raises(ValueError, copy_function_doc_to_method_doc(f5), A.method_f1)
run_tests_if_main()
|
jniediek/mne-python
|
mne/tests/test_utils.py
|
Python
|
bsd-3-clause
| 25,120
|
[
"Mayavi"
] |
6d9a612a6ff395ee6699f793297d998bf29e39d6dfb4e285fae20e2bbbdace22
|
import os
import re
import socket
import sys
import time
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django_extensions.management.utils import setup_logger, RedirectHandler
from django_extensions.management.technical_response import null_technical_500_response
try:
if 'django.contrib.staticfiles' in settings.INSTALLED_APPS:
from django.contrib.staticfiles.handlers import StaticFilesHandler
USE_STATICFILES = True
elif 'staticfiles' in settings.INSTALLED_APPS:
from staticfiles.handlers import StaticFilesHandler # noqa
USE_STATICFILES = True
else:
USE_STATICFILES = False
except ImportError:
USE_STATICFILES = False
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
DEFAULT_PORT = "8000"
import logging
logger = logging.getLogger(__name__)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use a IPv6 address.'),
make_option('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.'),
make_option('--browser', action='store_true', dest='open_browser',
help='Tells Django to open a browser.'),
make_option('--adminmedia', dest='admin_media_path', default='',
help='Specifies the directory from which to serve admin media.'),
make_option('--threaded', action='store_true', dest='threaded',
help='Run in multithreaded mode.'),
make_option('--output', dest='output_file', default=None,
help='Specifies an output file to send a copy of all messages (not flushed immediately).'),
make_option('--print-sql', action='store_true', default=False,
help="Print SQL queries as they're executed"),
make_option('--cert', dest='cert_path', action="store", type="string",
help='To use SSL, specify certificate path.'),
)
if USE_STATICFILES:
option_list += (
make_option('--nostatic', action="store_false", dest='use_static_handler', default=True,
help='Tells Django to NOT automatically serve static files at STATIC_URL.'),
make_option('--insecure', action="store_true", dest='insecure_serving', default=False,
help='Allows serving static files even if DEBUG is False.'),
)
help = "Starts a lightweight Web server for development."
args = '[optional port number, or ipaddr:port]'
# Validation is called explicitly each time the server is reloaded.
requires_model_validation = False
def handle(self, addrport='', *args, **options):
import django
setup_logger(logger, self.stderr, filename=options.get('output_file', None)) # , fmt="[%(name)s] %(message)s")
logredirect = RedirectHandler(__name__)
# Redirect werkzeug log items
werklogger = logging.getLogger('werkzeug')
werklogger.setLevel(logging.INFO)
werklogger.addHandler(logredirect)
werklogger.propagate = False
if options.get("print_sql", False):
from django.db.backends import util
try:
import sqlparse
except ImportError:
sqlparse = None # noqa
class PrintQueryWrapper(util.CursorDebugWrapper):
def execute(self, sql, params=()):
starttime = time.time()
try:
return self.cursor.execute(sql, params)
finally:
raw_sql = self.db.ops.last_executed_query(self.cursor, sql, params)
execution_time = time.time() - starttime
therest = ' -- [Execution time: %.6fs] [Database: %s]' % (execution_time, self.db.alias)
if sqlparse:
logger.info(sqlparse.format(raw_sql, reindent=True) + therest)
else:
logger.info(raw_sql + therest)
util.CursorDebugWrapper = PrintQueryWrapper
try:
from django.core.servers.basehttp import AdminMediaHandler
USE_ADMINMEDIAHANDLER = True
except ImportError:
USE_ADMINMEDIAHANDLER = False
try:
from django.core.servers.basehttp import get_internal_wsgi_application as WSGIHandler
except ImportError:
from django.core.handlers.wsgi import WSGIHandler # noqa
try:
from werkzeug import run_simple, DebuggedApplication
# Set colored output
if settings.DEBUG:
try:
set_werkzeug_log_color()
except: # We are dealing with some internals, anything could go wrong
print("Wrapping internal werkzeug logger for color highlighting has failed!")
pass
except ImportError:
raise CommandError("Werkzeug is required to use runserver_plus. Please visit http://werkzeug.pocoo.org/ or install via pip. (pip install Werkzeug)")
# usurp django's handler
from django.views import debug
debug.technical_500_response = null_technical_500_response
self.use_ipv6 = options.get('use_ipv6')
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not addrport:
try:
addrport = settings.RUNSERVERPLUS_SERVER_ADDRESS_PORT
except AttributeError:
pass
if not addrport:
self.addr = ''
self.port = DEFAULT_PORT
else:
m = re.match(naiveip_re, addrport)
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % addrport)
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." %
self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.'
% self.addr)
if not self.addr:
self.addr = '::1' if self.use_ipv6 else '127.0.0.1'
threaded = options.get('threaded', False)
use_reloader = options.get('use_reloader', True)
open_browser = options.get('open_browser', False)
cert_path = options.get("cert_path")
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
bind_url = "http://%s:%s/" % (
self.addr if not self._raw_ipv6 else '[%s]' % self.addr, self.port)
def inner_run():
print("Validating models...")
self.validate(display_num_errors=True)
print("\nDjango version %s, using settings %r" % (django.get_version(), settings.SETTINGS_MODULE))
print("Development server is running at %s" % (bind_url,))
print("Using the Werkzeug debugger (http://werkzeug.pocoo.org/)")
print("Quit the server with %s." % quit_command)
path = options.get('admin_media_path', '')
if not path:
admin_media_path = os.path.join(django.__path__[0], 'contrib/admin/static/admin')
if os.path.isdir(admin_media_path):
path = admin_media_path
else:
path = os.path.join(django.__path__[0], 'contrib/admin/media')
handler = WSGIHandler()
if USE_ADMINMEDIAHANDLER:
handler = AdminMediaHandler(handler, path)
if USE_STATICFILES:
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if use_static_handler and (settings.DEBUG or insecure_serving):
handler = StaticFilesHandler(handler)
if open_browser:
import webbrowser
webbrowser.open(bind_url)
if cert_path:
"""
OpenSSL is needed for SSL support.
This will make flakes8 throw warning since OpenSSL is not used
directly, alas, this is the only way to show meaningful error
messages. See:
http://lucumr.pocoo.org/2011/9/21/python-import-blackbox/
for more information on python imports.
"""
try:
import OpenSSL # NOQA
except ImportError:
raise CommandError("Python OpenSSL Library is "
"required to use runserver_plus with ssl support. "
"Install via pip (pip install pyOpenSSL).")
dir_path, cert_file = os.path.split(cert_path)
if not dir_path:
dir_path = os.getcwd()
root, ext = os.path.splitext(cert_file)
certfile = os.path.join(dir_path, root + ".crt")
keyfile = os.path.join(dir_path, root + ".key")
try:
from werkzeug.serving import make_ssl_devcert
if os.path.exists(certfile) and \
os.path.exists(keyfile):
ssl_context = (certfile, keyfile)
else: # Create cert, key files ourselves.
ssl_context = make_ssl_devcert(
os.path.join(dir_path, root), host='localhost')
except ImportError:
print("Werkzeug version is less than 0.9, trying adhoc certificate.")
ssl_context = "adhoc"
else:
ssl_context = None
run_simple(
self.addr,
int(self.port),
DebuggedApplication(handler, True),
use_reloader=use_reloader,
use_debugger=True,
threaded=threaded,
ssl_context=ssl_context
)
inner_run()
def set_werkzeug_log_color():
"""Try to set color to the werkzeug log.
"""
from django.core.management.color import color_style
from werkzeug.serving import WSGIRequestHandler
from werkzeug._internal import _log
_style = color_style()
_orig_log = WSGIRequestHandler.log
def werk_log(self, type, message, *args):
try:
msg = '%s - - [%s] %s' % (
self.address_string(),
self.log_date_time_string(),
message % args,
)
http_code = str(args[1])
except:
return _orig_log(type, message, *args)
# Utilize terminal colors, if available
if http_code[0] == '2':
# Put 2XX first, since it should be the common case
msg = _style.HTTP_SUCCESS(msg)
elif http_code[0] == '1':
msg = _style.HTTP_INFO(msg)
elif http_code == '304':
msg = _style.HTTP_NOT_MODIFIED(msg)
elif http_code[0] == '3':
msg = _style.HTTP_REDIRECT(msg)
elif http_code == '404':
msg = _style.HTTP_NOT_FOUND(msg)
elif http_code[0] == '4':
msg = _style.HTTP_BAD_REQUEST(msg)
else:
# Any 5XX, or any other response
msg = _style.HTTP_SERVER_ERROR(msg)
_log(type, msg)
WSGIRequestHandler.log = werk_log
|
marcoantoniooliveira/labweb
|
oscar/lib/python2.7/site-packages/django_extensions/management/commands/runserver_plus.py
|
Python
|
bsd-3-clause
| 12,343
|
[
"VisIt"
] |
62ba47939a99170e7c89f48adfd69413585dafcf4f6482e8b08392455eb6b43d
|
try:
import moogli
except ImportError as e:
print( "[INFO ] Could not import moogli. Quitting..." )
quit()
import moose
from moose import neuroml
from PyQt4 import Qt, QtCore, QtGui
import sys
import os
import random
import numpy as np
app = QtGui.QApplication(sys.argv)
filename = os.path.join( os.path.split(os.path.realpath(__file__))[0]
, "../neuroml/PurkinjeCellPassivePulseInput/PurkinjePassive.net.xml"
)
moose.neuroml.loadNeuroML_L123(filename)
morphology = moogli.read_morphology_from_moose(name = "", path = "/cells[0]")
morphology.create_group( "group-1"
, [ "/cells[0]/BigCellCML_0[0]/Seg0_dend_1043_1044[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1033_1034[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1019_1020[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1018_1019[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1016_1017[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1539_1540[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1579_1580[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1573_1574[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1572_1573[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1569_1570[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1559_1560[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1045_1046[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1021_1022[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1020_1021[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1581_1582[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1580_1581[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1046_1047[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1022_1023[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1027_1028[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1023_1024[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1028_1029[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1025_1026[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1024_1025[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1026_1027[0]"
]
, 10.0
, 200.0
, [1.0, 0.0, 0.0, 1.0]
, [0.0, 1.0, 0.0, 1.0]
)
morphology.create_group( "group-2"
, [ "/cells[0]/BigCellCML_0[0]/Seg0_dend_1076_1077[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1072_1073[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1099_1100[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1096_1097[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1108_1109[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1135_1136[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_35_36[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_655_656[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_535_536[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_716_717[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_14_15[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_87_88[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_236_237[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_218_219[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_172_173[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_152_153[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_363_364[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_362_363[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_344_345[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_341_342[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_320_321[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_312_313[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_453_454[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_449_450[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_437_438[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_426_427[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_418_419[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_409_410[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_407_408[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_396_397[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_394_395[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_390_391[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_389_390[0]"
, "/cells[0]/BigCellCML_0[0]/Seg0_dend_1079_1080[0]"
]
, 0.0
, 1.0
, [0.0, 1.0, 0.0, 1.0]
, [0.0, 0.0, 1.0, 1.0]
)
def callback(morphology, viewer):
morphology.set_color( "group-1"
, np.random.random_sample((24,)) * (100.0 - 20.0) + 20.0
)
morphology.set_color( "group-2"
, np.random.random_sample((34,))
)
viewer.roll(0.05, 0)
viewer.pitch(0.05, 1)
viewer.yaw(0.05, 2)
return True
viewer = moogli.DynamicMorphologyViewerWidget(morphology)
viewer.showMaximized()
viewer.split_horizontally(0)
viewer.split_vertically(1)
viewer.zoom(0.5, 0)
viewer.zoom(0.5, 1)
viewer.zoom(0.5, 2)
viewer.set_callback(callback)
app.exec_()
|
BhallaLab/moose
|
moose-examples/moogli/color_update.py
|
Python
|
gpl-3.0
| 6,159
|
[
"MOOSE"
] |
e8e9d070529bb813c6b1340e71d5dcf129e99d58a48bee1c7ce76dd59723f389
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Module containing class to create an ion
"""
__author__ = "Sai Jayaraman"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.0"
__maintainer__ = "Sai Jayaraman"
__email__ = "sjayaram@mit.edu"
__status__ = "Production"
__date__ = "Dec 10, 2012"
import re
import numpy as np
from pymatgen.core.composition import Composition
from monty.json import MSONable
from pymatgen.util.string import formula_double_format
class Ion(Composition, MSONable):
"""
Basic ion object. It is just a Composition object with an additional
variable to store charge.
The net charge can either be represented as Mn++, or Mn+2, or Mn[2+].
Note the order of the sign and magnitude in each representation.
"""
def __init__(self, composition, charge=0.0, properties=None):
"""
Flexible Ion construction, similar to Composition.
For more information, please see pymatgen.core.Composition
"""
super(Ion, self).__init__(composition)
self._charge = charge
@classmethod
def from_formula(cls, formula):
charge = 0.0
f = formula
m = re.search(r"\[([^\[\]]+)\]", f)
if m:
m_chg = re.search(r"([\.\d]*)([+-])", m.group(1))
if m_chg:
if m_chg.group(1) != "":
charge += float(m_chg.group(1)) * \
(float(m_chg.group(2) + "1"))
else:
charge += float(m_chg.group(2) + "1")
f = f.replace(m.group(), "", 1)
m = re.search(r"\(aq\)", f)
if m:
f = f.replace(m.group(), "", 1)
for m_chg in re.finditer(r"([+-])([\.\d]*)", f):
sign = m_chg.group(1)
sgn = float(str(sign + "1"))
if m_chg.group(2).strip() != "":
charge += float(m_chg.group(2)) * sgn
else:
charge += sgn
f = f.replace(m_chg.group(), "", 1)
composition = Composition(f)
return cls(composition, charge)
@property
def formula(self):
"""
Returns a formula string, with elements sorted by electronegativity,
e.g., Li4 Fe4 P4 O16.
"""
formula = super(Ion, self).formula
chg_str = ""
if self.charge > 0:
chg_str = " +" + formula_double_format(self.charge, False)
elif self._charge < 0:
chg_str = " " + formula_double_format(self.charge, False)
return formula + chg_str
@property
def anonymized_formula(self):
"""
An anonymized formula. Appends charge to the end
of anonymized composition
"""
anon_formula = super(Ion, self).anonymized_formula
chg = self._charge
chg_str = ""
if chg > 0:
chg_str += ("{}{}".format('+', str(int(chg))))
elif chg < 0:
chg_str += ("{}{}".format('-', str(int(np.abs(chg)))))
return anon_formula + chg_str
@property
def reduced_formula(self):
"""
Returns a reduced formula string with appended charge.
"""
reduced_formula = super(Ion, self).reduced_formula
charge = self._charge / self.get_reduced_composition_and_factor()[1]
if charge > 0:
if abs(charge) == 1:
chg_str = "[+]"
else:
chg_str = "[" + formula_double_format(charge, False) + "+]"
elif charge < 0:
if abs(charge) == 1:
chg_str = "[-]"
else:
chg_str = "[{}-]".format(formula_double_format(abs(charge),
False))
else:
chg_str = "(aq)"
return reduced_formula + chg_str
@property
def alphabetical_formula(self):
"""
Returns a reduced formula string with appended charge
"""
alph_formula = super(Ion, self).alphabetical_formula
chg_str = ""
if self.charge > 0:
chg_str = " +" + formula_double_format(self.charge, False)
elif self.charge < 0:
chg_str = " " + formula_double_format(self.charge, False)
return alph_formula + chg_str
@property
def charge(self):
"""
Charge of the ion
"""
return self._charge
def as_dict(self):
"""
Returns:
dict with composition, as well as charge
"""
d = super(Ion, self).as_dict()
d['charge'] = self.charge
return d
@classmethod
def from_dict(cls, d):
"""
Generates an ion object from a dict created by as_dict().
Args:
d:
{symbol: amount} dict.
"""
charge = d.pop('charge')
composition = Composition(d)
return Ion(composition, charge)
@property
def to_reduced_dict(self):
"""
Returns:
dict with element symbol and reduced amount e.g.,
{"Fe": 2.0, "O":3.0}.
"""
d = self.composition.to_reduced_dict
d['charge'] = self.charge
return d
@property
def composition(self):
return Composition(self._data)
def __eq__(self, other):
if self.composition != other.composition:
return False
if self.charge != other.charge:
return False
return True
def __add__(self, other):
"""
Addition of two ions.
"""
new_composition = self.composition + other.composition
new_charge = self.charge + other.charge
return Ion(new_composition, new_charge)
def __sub__(self, other):
"""
Subtraction of two ions
"""
new_composition = self.composition - other.composition
new_charge = self.charge - other.charge
return Ion(new_composition, new_charge)
def __mul__(self, other):
"""
Multiplication of an Ion with a factor
"""
new_composition = self.composition * other
new_charge = self.charge * other
return Ion(new_composition, new_charge)
def __hash__(self):
return hash((self.composition, self.charge))
def __str__(self):
return self.formula
def __repr__(self):
return "Ion: " + self.formula
|
montoyjh/pymatgen
|
pymatgen/core/ion.py
|
Python
|
mit
| 6,452
|
[
"pymatgen"
] |
357bb33ff01c2acf056b0574d567922a32d391738e2250c9f24f63b0d687e982
|
import pandas as pd
import xarray as xr
import os as os
homedir = 'D:/UW_PhD/Python/xarray/'
os.chdir(homedir)
#open pnnl lat long el file
pnnlxy = xr.open_dataset('data_LatLonGht.nc')
#range of dates to create new files for
start_date = '2006-11-01'
end_date = '2006-11-20'
dates = [x.strftime('%Y-%m-%d') for x in pd.date_range(start=start_date, end=end_date, freq='D')]
#for each day (file) open pnnl netcdf file as xarray dataset, add coordinate data, save as new netcdf
for ind, ymd in enumerate(dates):
date = ymd
pnnl = xr.open_dataset('data.' + date +'.nc')
#merge pnnl lat long data with climate data
pnnlnew = xr.merge([pnnl, pnnlxy], compat='no_conflicts')
#convert variables LAT, LON and Z to coordinates
pnnlnewc = pnnlnew.set_coords({'LAT','LON','Z'}, inplace=False)
#create series of dates to add to dataset
time = pd.date_range(start=date, periods=24, freq='H')
#add coordinates using series of dates
pnnlnewc.update({'time': ('time', time)})
#problem with xarray dataframe.to_netcdf method: does not work if attributes contain attribute titled "coordinates";
#for variables that have coordinates in attributes, remove "coordinate" from attributes
pnnlnewc.Q2.attrs = ([('stagger', ''),
('units', 'kg kg-1'),
('description', 'QV at 2 M'),
('MemoryOrder', 'XY '),
('FieldType', 104)])
pnnlnewc.PSFC.attrs = ([('stagger', ''),
('units', 'Pa'),
('description', 'SFC PRESSURE'),
('MemoryOrder', 'XY '),
('FieldType', 104)])
pnnlnewc.GLW.attrs = ([('stagger', ''),
('units', 'W m-2'),
('description', 'DOWNWARD LONG WAVE FLUX AT GROUND SURFACE'),
('MemoryOrder', 'XY '),
('FieldType', 104)])
pnnlnewc.SWDOWN.attrs = ([('stagger', ''),
('units', 'W m-2'),
('description', 'DOWNWARD SHORT WAVE FLUX AT GROUND SURFACE'),
('MemoryOrder', 'XY '),
('FieldType', 104)])
pnnlnewc.PREC_ACC_NC.attrs = ([('stagger', ''),
('units', 'mm'),
('description', 'GRID SCALE PRECIPITATION'),
('MemoryOrder', 'XY '),
('FieldType', 104)])
pnnlnewc.SNOW_ACC_NC.attrs = ([('stagger', ''),
('units', 'mm'),
('description', 'SNOW WATER EQUIVALENT'),
('MemoryOrder', 'XY '),
('FieldType', 104)])
#save new netcdf file
pnnlnewc.to_netcdf(date + '_coord.nc')
print(str(ind) + ' : ' + date)
|
ChristinaB/Observatory
|
tutorials/CreatePNNLnetcdfFile_MultipleFiles.py
|
Python
|
mit
| 3,103
|
[
"NetCDF"
] |
f78ec9df92bf14cdbed3497069b2977e977c5a49e166c1df46dc2ab9a66d16b5
|
from rdkit import DataStructs
from rdkit import RDConfig
from rdkit import Chem
from rdkit.Chem import ChemicalFeatures,rdDistGeom
from rdkit import Geometry
import unittest,os
def lstFeq(l1, l2, tol=1.e-4):
if (len(list(l1)) != len(list(l2))):
return 0
for i in range(len(list(l1))):
if not feq(l1[i], l2[i], tol):
return 0
return 1
def feq(v1,v2,tol2=1e-4):
return abs(v1-v2)<=tol2
class TestCase(unittest.TestCase):
def setUp(self):
pass
def testBasic(self):
cfac = ChemicalFeatures.BuildFeatureFactory(os.path.join(RDConfig.RDBaseDir,
'Code','GraphMol','MolChemicalFeatures','test_data','featDef.txt'))
self.failUnless(cfac.GetNumFeatureDefs() == 2)
fNames = cfac.GetFeatureFamilies()
self.failUnless(len(fNames) == 2)
self.failUnless(fNames[0] == 'HBondDonor')
self.failUnless(fNames[1] == 'HBondAcceptor')
mol = Chem.MolFromSmiles("COCN")
rdDistGeom.EmbedMolecule(mol, 30, 100)
self.failUnless(cfac.GetNumMolFeatures(mol) == 3)
for i in range(cfac.GetNumMolFeatures(mol)):
self.failUnless(cfac.GetMolFeature(mol,i))
# check that the recompute argument works:
self.failUnless(cfac.GetMolFeature(mol,0))
for i in range(cfac.GetNumMolFeatures(mol)):
self.failUnless(cfac.GetMolFeature(mol,i,"",False))
self.failUnlessRaises(IndexError,lambda : cfac.GetMolFeature(mol,3))
feats = cfac.GetFeaturesForMol(mol)
self.failUnless(len(feats) == 3)
fTypes = ['HBondDonor', 'HBondAcceptor', 'HBondAcceptor']
positions = [[1.3215, -0.6284, 0.0967],
[-0.7136, 0.6241, 0.1862],
[1.3215, -0.6284, 0.0967]]
targetAids = [[3], [1], [3]]
i = 0
for feat in feats:
self.failUnless(feat.GetFamily() == fTypes[i])
pos = list(feat.GetPos())
aids = list(feat.GetAtomIds())
self.failUnless(aids == targetAids[i])
self.failUnless(lstFeq(pos, positions[i]))
nmol = feat.GetMol()
self.failUnless(Chem.MolToSmiles(nmol) == "COCN")
ncfac = feat.GetFactory()
self.failUnless(ncfac.GetNumFeatureDefs() == 2)
i += 1
def testIncludeOnly(self):
cfac = ChemicalFeatures.BuildFeatureFactory(os.path.join(RDConfig.RDBaseDir,
'Code','GraphMol','MolChemicalFeatures','test_data','featDef.txt'))
self.failUnless(cfac.GetNumFeatureDefs() == 2)
mol = Chem.MolFromSmiles("COCN")
rdDistGeom.EmbedMolecule(mol)
self.failUnless(cfac.GetNumMolFeatures(mol,includeOnly="HBondAcceptor") == 2)
self.failUnless(cfac.GetNumMolFeatures(mol,includeOnly="HBondDonor") == 1)
self.failUnless(cfac.GetNumMolFeatures(mol,includeOnly="Bogus") == 0)
self.failUnlessRaises(IndexError,lambda : cfac.GetMolFeature(mol,1,includeOnly="HBondDonor"))
self.failUnlessRaises(IndexError,lambda : cfac.GetMolFeature(mol,2,includeOnly="HBondAcceptor"))
f = cfac.GetMolFeature(mol,0,includeOnly="HBondDonor")
self.failUnless(f.GetFamily()=='HBondDonor')
feats = cfac.GetFeaturesForMol(mol,includeOnly="HBondAcceptor")
self.failUnless(len(feats) == 2)
feats = cfac.GetFeaturesForMol(mol,includeOnly="HBondDonor")
self.failUnless(len(feats) == 1)
feats = cfac.GetFeaturesForMol(mol,includeOnly="Bogus")
self.failUnless(len(feats) == 0)
def testStringParse(self):
fdefBlock = \
"""DefineFeature HDonor1 [N,O;!H0]
Family HBondDonor
Weights 1.0
EndFeature
DefineFeature HAcceptor1 [N,O;H0]
Family HBondAcceptor
Weights 1.0
EndFeature
"""
cfac = ChemicalFeatures.BuildFeatureFactoryFromString(fdefBlock)
self.failUnless(cfac.GetNumFeatureDefs() == 2)
def testStringParse2(self):
fdefBlock = \
"""DefineFeature HDonor1 [N,O;!H0]\r
Family HBondDonor\r
Weights 1.0\r
EndFeature\r
DefineFeature HAcceptor1 [N,O;H0]\r
Family HBondAcceptor\r
Weights 1.0\r
EndFeature\r
"""
cfac = ChemicalFeatures.BuildFeatureFactoryFromString(fdefBlock)
self.failUnless(cfac.GetNumFeatureDefs() == 2)
def testParseErrorHandling(self):
fdefBlock = \
"""DefineFeature HDonor1 [N,O;!HQ]
Family HBondDonor
Weights 1.0
EndFeature
"""
self.failUnlessRaises(ValueError,
lambda:ChemicalFeatures.BuildFeatureFactoryFromString(fdefBlock))
fdefBlock = \
"""DefineFeature HDonor1 [N,O;!H0]
Family HBondDonor
Weights 1.0
"""
self.failUnlessRaises(ValueError,
lambda:ChemicalFeatures.BuildFeatureFactoryFromString(fdefBlock))
self.failUnlessRaises(IOError,
lambda:ChemicalFeatures.BuildFeatureFactory('noSuchFile.txt'))
def testAtomMatch(self):
fdefBlock = \
"""
DefineFeature HAcceptor1 [#7,#8]
Family HBondAcceptor
Weights 1.0
EndFeature
DefineFeature Arom1 a1aaaaa1
Family Aromatic
Weights 1.0,1.0,1.0,1.0,1.0,1.0
EndFeature
"""
cfac = ChemicalFeatures.BuildFeatureFactoryFromString(fdefBlock)
self.failUnless(cfac.GetNumFeatureDefs() == 2)
mol = Chem.MolFromSmiles('n1ccccc1')
feats = cfac.GetFeaturesForMol(mol)
self.failUnless(len(feats)==2)
m = ChemicalFeatures.GetAtomMatch(feats)
self.failIf(m)
mol = Chem.MolFromSmiles('c1ccccc1N')
feats = cfac.GetFeaturesForMol(mol)
self.failUnless(len(feats)==2)
m = ChemicalFeatures.GetAtomMatch(feats)
self.failUnless(len(m)==2)
def testIssue231(self):
fdefs = """
DefineFeature HDonor1 [N,O;!H0]
Family HBondDonor
Weights 1.0
EndFeature
DefineFeature HAcceptor1 [N,O;H0]
Family HBondAcceptor
Weights 1.0
EndFeature
"""
cfac = ChemicalFeatures.BuildFeatureFactoryFromString(fdefs)
m = Chem.MolFromSmiles('O=CCCN')
rdDistGeom.EmbedMolecule(m)
feats = cfac.GetFeaturesForMol(m)
for feat in feats:
feat.GetPos()
m = None
for feat in feats:
feat.GetPos()
if __name__ == '__main__':
unittest.main()
|
rdkit/rdkit-orig
|
Code/GraphMol/MolChemicalFeatures/Wrap/testChemicalFeatures.py
|
Python
|
bsd-3-clause
| 5,970
|
[
"RDKit"
] |
95e6bf863344f5b1cf9ef365fcbaf4151b7dbfef9c1400df2d0bb6313ec4570c
|
# -*- coding: utf-8 -*-
u"""Traits-based GUI for head-MRI coregistration.
Hierarchy
---------
This is the hierarchy of classes for control. Brackets like [1] denote
properties that are set to be equivalent.
::
CoregFrame: GUI for head-MRI coregistration.
|-- CoregModel (model): Traits object for estimating the head mri transform.
| |-- MRIHeadWithFiducialsModel (mri) [1]: Represent an MRI head shape (high and low res) with fiducials.
| | |-- SurfaceSource (bem_high_res): High-res MRI head
| | |-- SurfaceSource (bem_low_res): Low-res MRI head
| | +-- MRISubjectSource (subject_source) [2]: Find subjects in SUBJECTS_DIR and select one.
| |-- FiducialsSource (fid): Expose points of a given fiducials fif file.
| +-- DigSource (hsp): Expose measurement information from a inst file.
|-- MlabSceneModel (scene) [3]: mayavi.core.ui.mayavi_scene
|-- DataPanel (data_panel)
| |-- HeadViewController (headview) [4]: Set head views for the given coordinate system.
| | +-- MlabSceneModel (scene) [3*]: ``HeadViewController(scene=CoregFrame.scene)``
| |-- SubjectSelectorPanel (subject_panel): Subject selector panel
| | +-- MRISubjectSource (model) [2*]: ``SubjectSelectorPanel(model=self.model.mri.subject_source)``
| +-- FiducialsPanel (fid_panel): Set fiducials on an MRI surface.
| |-- MRIHeadWithFiducialsModel (model) [1*]: ``FiducialsPanel(model=CoregFrame.model.mri, headview=CoregFrame.headview)``
| |-- HeadViewController (headview) [4*]: ``FiducialsPanel(model=CoregFrame.model.mri, headview=CoregFrame.headview)``
| +-- SurfaceObject (hsp_obj) [5*]: ``CoregFrame.fid_panel.hsp_obj = CoregFrame.mri_obj``
|-- CoregPanel (coreg_panel): Coregistration panel for Head<->MRI with scaling.
| +-- FittingOptionsPanel (fitting_options_panel): panel for fitting options.
|-- SurfaceObject (mri_obj) [5]: Represent a solid object in a mayavi scene.
+-- PointObject ({hsp, eeg, lpa, nasion, rpa, hsp_lpa, hsp_nasion, hsp_rpa} + _obj): Represent a group of individual points in a mayavi scene.
In the MRI viewing frame, MRI points and transformed via scaling, then by
mri_head_t to the Neuromag head coordinate frame. Digitized points (in head
coordinate frame) are never transformed.
Units
-----
User-facing GUI values are in readable units:
- ``scale_*`` are in %
- ``trans_*`` are in mm
- ``rot_*`` are in °
Internal computation quantities ``parameters`` are in units of (for X/Y/Z):
- ``parameters[:3]`` are in radians
- ``parameters[3:6]`` are in m
- ``paramteres[6:9]`` are in scale proportion
Conversions are handled via `np.deg2rad`, `np.rad2deg`, and appropriate
multiplications / divisions.
""" # noqa: E501
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
import queue
import re
from threading import Thread
import traceback
import warnings
import numpy as np
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import (error, confirm, OK, YES, NO, CANCEL, information,
FileDialog, GUI)
from traits.api import (Bool, Button, cached_property, DelegatesTo, Directory,
Enum, Float, HasTraits, HasPrivateTraits, Instance,
Int, on_trait_change, Property, Str, List, RGBColor)
from traitsui.api import (View, Item, Group, HGroup, VGroup, VGrid, EnumEditor,
Handler, Label, Spring, InstanceEditor, StatusItem,
UIInfo)
from traitsui.menu import Action, UndoButton, CancelButton, NoButtons
from tvtk.pyface.scene_editor import SceneEditor
from ..bem import make_bem_solution, write_bem_solution
from ..coreg import bem_fname, trans_fname
from ..defaults import DEFAULTS
from ..surface import _DistanceQuery
from ..transforms import (write_trans, read_trans, apply_trans, rotation,
rotation_angles, Transform, _ensure_trans,
rot_to_quat, _angle_between_quats)
from ..coreg import fit_matched_points, scale_mri, _find_fiducials_files
from ..viz.backends._pysurfer_mayavi import _toggle_mlab_render
from ..utils import logger, set_config, _pl
from ._fiducials_gui import MRIHeadWithFiducialsModel, FiducialsPanel
from ._file_traits import trans_wildcard, DigSource, SubjectSelectorPanel
from ._viewer import (HeadViewController, PointObject, SurfaceObject,
_DEG_WIDTH, _MM_WIDTH, _BUTTON_WIDTH,
_SHOW_BORDER, _COREG_WIDTH, _SCALE_STEP_WIDTH,
_INC_BUTTON_WIDTH, _SCALE_WIDTH, _WEIGHT_WIDTH,
_MM_STEP_WIDTH, _DEG_STEP_WIDTH, _REDUCED_TEXT_WIDTH,
_RESET_LABEL, _RESET_WIDTH,
laggy_float_editor_scale, laggy_float_editor_deg,
laggy_float_editor_mm, laggy_float_editor_weight)
defaults = DEFAULTS['coreg']
class busy(object):
"""Set the GUI state to busy."""
def __enter__(self): # noqa: D105
GUI.set_busy(True)
def __exit__(self, type, value, traceback): # noqa: D105
GUI.set_busy(False)
def _pass(x):
"""Format text without changing it."""
return x
class CoregModel(HasPrivateTraits):
"""Traits object for estimating the head mri transform.
Notes
-----
Transform from head to mri space is modelled with the following steps:
* move the head shape to its nasion position
* rotate the head shape with user defined rotation around its nasion
* move the head shape by user defined translation
* move the head shape origin to the mri nasion
If MRI scaling is enabled,
* the MRI is scaled relative to its origin center (prior to any
transformation of the digitizer head)
Don't sync transforms to anything to prevent them from being recomputed
upon every parameter change.
"""
# data sources
mri = Instance(MRIHeadWithFiducialsModel, ())
hsp = Instance(DigSource, ())
# parameters
guess_mri_subject = Bool(True) # change MRI subject when dig file changes
grow_hair = Float(label=u"ΔHair", desc="Move the back of the MRI "
"head outwards to compensate for hair on the digitizer "
"head shape (mm)")
n_scale_params = Enum(0, 1, 3, desc="Scale the MRI to better fit the "
"subject's head shape (a new MRI subject will be "
"created with a name specified upon saving)")
scale_x = Float(100, label="X")
scale_y = Float(100, label="Y")
scale_z = Float(100, label="Z")
trans_x = Float(0, label=u"ΔX")
trans_y = Float(0, label=u"ΔY")
trans_z = Float(0, label=u"ΔZ")
rot_x = Float(0, label=u"∠X")
rot_y = Float(0, label=u"∠Y")
rot_z = Float(0, label=u"∠Z")
parameters = List()
last_parameters = List()
lpa_weight = Float(1.)
nasion_weight = Float(10.)
rpa_weight = Float(1.)
hsp_weight = Float(1.)
eeg_weight = Float(1.)
hpi_weight = Float(1.)
iteration = Int(-1)
icp_iterations = Int(20)
icp_angle = Float(0.2)
icp_distance = Float(0.2)
icp_scale = Float(0.2)
icp_fid_match = Enum('nearest', 'matched')
fit_icp_running = Bool(False)
fits_icp_running = Bool(False)
coord_frame = Enum('mri', 'head', desc='Display coordinate frame')
status_text = Str()
# options during scaling
scale_labels = Bool(True, desc="whether to scale *.label files")
copy_annot = Bool(True, desc="whether to copy *.annot files for scaled "
"subject")
prepare_bem_model = Bool(True, desc="whether to run mne_prepare_bem_model "
"after scaling the MRI")
# secondary to parameters
has_nasion_data = Property(
Bool, depends_on=['mri:nasion', 'hsp:nasion'])
has_lpa_data = Property(
Bool, depends_on=['mri:lpa', 'hsp:lpa'])
has_rpa_data = Property(
Bool, depends_on=['mri:rpa', 'hsp:rpa'])
has_fid_data = Property( # conjunction
Bool, depends_on=['has_nasion_data', 'has_lpa_data', 'has_rpa_data'])
has_mri_data = Property(
Bool, depends_on=['transformed_high_res_mri_points'])
has_hsp_data = Property(
Bool, depends_on=['has_mri_data', 'hsp:points'])
has_eeg_data = Property(
Bool, depends_on=['has_mri_data', 'hsp:eeg_points'])
has_hpi_data = Property(
Bool, depends_on=['has_mri_data', 'hsp:hpi_points'])
n_icp_points = Property(
Int, depends_on=['has_nasion_data', 'nasion_weight',
'has_lpa_data', 'lpa_weight',
'has_rpa_data', 'rpa_weight',
'hsp:points', 'hsp_weight',
'hsp:eeg_points', 'eeg_weight',
'hsp:hpi_points', 'hpi_weight'])
changes = Property(depends_on=['parameters', 'old_parameters'])
# target transforms
mri_head_t = Property(
desc="Transformation of the scaled MRI to the head coordinate frame.",
depends_on=['parameters[]'])
head_mri_t = Property(depends_on=['mri_head_t'])
mri_trans = Property(depends_on=['mri_head_t', 'parameters[]',
'coord_frame'])
hsp_trans = Property(depends_on=['head_mri_t', 'coord_frame'])
# info
subject_has_bem = DelegatesTo('mri')
lock_fiducials = DelegatesTo('mri')
can_prepare_bem_model = Property(
Bool,
depends_on=['n_scale_params', 'subject_has_bem'])
can_save = Property(Bool, depends_on=['mri_head_t'])
raw_subject = Property(
desc="Subject guess based on the raw file name.",
depends_on=['hsp:inst_fname'])
# MRI geometry transformed to viewing coordinate system
processed_high_res_mri_points = Property(
depends_on=['mri:bem_high_res:surf', 'grow_hair'])
processed_low_res_mri_points = Property(
depends_on=['mri:bem_low_res:surf', 'grow_hair'])
transformed_high_res_mri_points = Property(
depends_on=['processed_high_res_mri_points', 'mri_trans'])
transformed_low_res_mri_points = Property(
depends_on=['processed_low_res_mri_points', 'mri_trans'])
nearest_calc = Property(
Instance(_DistanceQuery),
depends_on=['transformed_high_res_mri_points'])
nearest_transformed_high_res_mri_idx_lpa = Property(
depends_on=['nearest_calc', 'transformed_hsp_lpa'])
nearest_transformed_high_res_mri_idx_nasion = Property(
depends_on=['nearest_calc', 'transformed_hsp_nasion'])
nearest_transformed_high_res_mri_idx_rpa = Property(
depends_on=['nearest_calc', 'transformed_hsp_rpa'])
nearest_transformed_high_res_mri_idx_hsp = Property(
depends_on=['nearest_calc', 'transformed_hsp_points'])
nearest_transformed_high_res_mri_idx_orig_hsp = Property(
depends_on=['nearest_calc', 'transformed_orig_hsp_points'])
nearest_transformed_high_res_mri_idx_eeg = Property(
depends_on=['nearest_calc', 'transformed_hsp_eeg_points'])
nearest_transformed_high_res_mri_idx_hpi = Property(
depends_on=['nearest_calc', 'transformed_hsp_hpi'])
transformed_mri_lpa = Property(
depends_on=['mri:lpa', 'mri_trans'])
transformed_mri_nasion = Property(
depends_on=['mri:nasion', 'mri_trans'])
transformed_mri_rpa = Property(
depends_on=['mri:rpa', 'mri_trans'])
# HSP geometry transformed to viewing coordinate system
transformed_hsp_points = Property(
depends_on=['hsp:points', 'hsp_trans'])
transformed_orig_hsp_points = Property(
depends_on=['hsp:_hsp_points', 'hsp_trans'])
transformed_hsp_lpa = Property(
depends_on=['hsp:lpa', 'hsp_trans'])
transformed_hsp_nasion = Property(
depends_on=['hsp:nasion', 'hsp_trans'])
transformed_hsp_rpa = Property(
depends_on=['hsp:rpa', 'hsp_trans'])
transformed_hsp_eeg_points = Property(
depends_on=['hsp:eeg_points', 'hsp_trans'])
transformed_hsp_hpi = Property(
depends_on=['hsp:hpi', 'hsp_trans'])
# fit properties
lpa_distance = Property(
depends_on=['transformed_mri_lpa', 'transformed_hsp_lpa'])
nasion_distance = Property(
depends_on=['transformed_mri_nasion', 'transformed_hsp_nasion'])
rpa_distance = Property(
depends_on=['transformed_mri_rpa', 'transformed_hsp_rpa'])
point_distance = Property( # use low res points
depends_on=['nearest_transformed_high_res_mri_idx_hsp',
'nearest_transformed_high_res_mri_idx_eeg',
'nearest_transformed_high_res_mri_idx_hpi',
'hsp_weight',
'eeg_weight',
'hpi_weight'])
orig_hsp_point_distance = Property( # use low res points
depends_on=['nearest_transformed_high_res_mri_idx_orig_hsp',
'hpi_weight'])
# fit property info strings
fid_eval_str = Property(
depends_on=['lpa_distance', 'nasion_distance', 'rpa_distance'])
points_eval_str = Property(
depends_on=['point_distance'])
def _parameters_default(self):
return list(_DEFAULT_PARAMETERS)
def _last_parameters_default(self):
return list(_DEFAULT_PARAMETERS)
@cached_property
def _get_can_prepare_bem_model(self):
return self.subject_has_bem and self.n_scale_params > 0
@cached_property
def _get_can_save(self):
return np.any(self.mri_head_t != np.eye(4))
@cached_property
def _get_has_lpa_data(self):
return (np.any(self.mri.lpa) and np.any(self.hsp.lpa))
@cached_property
def _get_has_nasion_data(self):
return (np.any(self.mri.nasion) and np.any(self.hsp.nasion))
@cached_property
def _get_has_rpa_data(self):
return (np.any(self.mri.rpa) and np.any(self.hsp.rpa))
@cached_property
def _get_has_fid_data(self):
return self.has_nasion_data and self.has_lpa_data and self.has_rpa_data
@cached_property
def _get_has_mri_data(self):
return len(self.transformed_high_res_mri_points) > 0
@cached_property
def _get_has_hsp_data(self):
return (self.has_mri_data and
len(self.nearest_transformed_high_res_mri_idx_hsp) > 0)
@cached_property
def _get_has_eeg_data(self):
return (self.has_mri_data and
len(self.nearest_transformed_high_res_mri_idx_eeg) > 0)
@cached_property
def _get_has_hpi_data(self):
return (self.has_mri_data and
len(self.nearest_transformed_high_res_mri_idx_hpi) > 0)
@cached_property
def _get_n_icp_points(self):
"""Get parameters for an ICP iteration."""
n = (self.hsp_weight > 0) * len(self.hsp.points)
for key in ('lpa', 'nasion', 'rpa'):
if getattr(self, 'has_%s_data' % key):
n += 1
n += (self.eeg_weight > 0) * len(self.hsp.eeg_points)
n += (self.hpi_weight > 0) * len(self.hsp.hpi_points)
return n
@cached_property
def _get_changes(self):
new = np.array(self.parameters, float)
old = np.array(self.last_parameters, float)
move = np.linalg.norm(old[3:6] - new[3:6]) * 1e3
angle = np.rad2deg(_angle_between_quats(
rot_to_quat(rotation(*new[:3])[:3, :3]),
rot_to_quat(rotation(*old[:3])[:3, :3])))
percs = 100 * (new[6:] - old[6:]) / old[6:]
return move, angle, percs
@cached_property
def _get_mri_head_t(self):
# rotate and translate hsp
trans = rotation(*self.parameters[:3])
trans[:3, 3] = np.array(self.parameters[3:6])
return trans
@cached_property
def _get_head_mri_t(self):
trans = rotation(*self.parameters[:3]).T
trans[:3, 3] = -np.dot(trans[:3, :3], self.parameters[3:6])
# should be the same as np.linalg.inv(self.mri_head_t)
return trans
@cached_property
def _get_processed_high_res_mri_points(self):
return self._get_processed_mri_points('high')
@cached_property
def _get_processed_low_res_mri_points(self):
return self._get_processed_mri_points('low')
def _get_processed_mri_points(self, res):
bem = self.mri.bem_low_res if res == 'low' else self.mri.bem_high_res
if self.grow_hair:
if len(bem.surf.nn):
scaled_hair_dist = (1e-3 * self.grow_hair /
np.array(self.parameters[6:9]))
points = bem.surf.rr.copy()
hair = points[:, 2] > points[:, 1]
points[hair] += bem.surf.nn[hair] * scaled_hair_dist
return points
else:
error(None, "Norms missing from bem, can't grow hair")
self.grow_hair = 0
else:
return bem.surf.rr
@cached_property
def _get_mri_trans(self):
mri_scaling = np.ones(4)
mri_scaling[:3] = self.parameters[6:9]
if self.coord_frame == 'head':
t = self.mri_head_t
else:
t = np.eye(4)
return t * mri_scaling
@cached_property
def _get_hsp_trans(self):
if self.coord_frame == 'head':
t = np.eye(4)
else:
t = self.head_mri_t
return t
@cached_property
def _get_nearest_transformed_high_res_mri_idx_lpa(self):
return self.nearest_calc.query(self.transformed_hsp_lpa)[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_nasion(self):
return self.nearest_calc.query(self.transformed_hsp_nasion)[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_rpa(self):
return self.nearest_calc.query(self.transformed_hsp_rpa)[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_hsp(self):
return self.nearest_calc.query(self.transformed_hsp_points)[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_orig_hsp(self):
# This is redundant to some extent with the one above due to
# overlapping points, but it's fast and the refactoring to
# remove redundancy would be a pain.
return self.nearest_calc.query(self.transformed_orig_hsp_points)[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_eeg(self):
return self.nearest_calc.query(self.transformed_hsp_eeg_points)[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_hpi(self):
return self.nearest_calc.query(self.transformed_hsp_hpi)[1]
# MRI view-transformed data
@cached_property
def _get_transformed_low_res_mri_points(self):
points = apply_trans(self.mri_trans,
self.processed_low_res_mri_points)
return points
@cached_property
def _get_nearest_calc(self):
return _DistanceQuery(self.transformed_high_res_mri_points)
@cached_property
def _get_transformed_high_res_mri_points(self):
points = apply_trans(self.mri_trans,
self.processed_high_res_mri_points)
return points
@cached_property
def _get_transformed_mri_lpa(self):
return apply_trans(self.mri_trans, self.mri.lpa)
@cached_property
def _get_transformed_mri_nasion(self):
return apply_trans(self.mri_trans, self.mri.nasion)
@cached_property
def _get_transformed_mri_rpa(self):
return apply_trans(self.mri_trans, self.mri.rpa)
# HSP view-transformed data
@cached_property
def _get_transformed_hsp_points(self):
return apply_trans(self.hsp_trans, self.hsp.points)
@cached_property
def _get_transformed_orig_hsp_points(self):
return apply_trans(self.hsp_trans, self.hsp._hsp_points)
@cached_property
def _get_transformed_hsp_lpa(self):
return apply_trans(self.hsp_trans, self.hsp.lpa)
@cached_property
def _get_transformed_hsp_nasion(self):
return apply_trans(self.hsp_trans, self.hsp.nasion)
@cached_property
def _get_transformed_hsp_rpa(self):
return apply_trans(self.hsp_trans, self.hsp.rpa)
@cached_property
def _get_transformed_hsp_eeg_points(self):
return apply_trans(self.hsp_trans, self.hsp.eeg_points)
@cached_property
def _get_transformed_hsp_hpi(self):
return apply_trans(self.hsp_trans, self.hsp.hpi_points)
# Distances, etc.
@cached_property
def _get_lpa_distance(self):
d = np.ravel(self.transformed_mri_lpa - self.transformed_hsp_lpa)
return np.linalg.norm(d)
@cached_property
def _get_nasion_distance(self):
d = np.ravel(self.transformed_mri_nasion - self.transformed_hsp_nasion)
return np.linalg.norm(d)
@cached_property
def _get_rpa_distance(self):
d = np.ravel(self.transformed_mri_rpa - self.transformed_hsp_rpa)
return np.linalg.norm(d)
@cached_property
def _get_point_distance(self):
mri_points = list()
hsp_points = list()
if self.hsp_weight > 0 and self.has_hsp_data:
mri_points.append(self.transformed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_hsp])
hsp_points.append(self.transformed_hsp_points)
if self.eeg_weight > 0 and self.has_eeg_data:
mri_points.append(self.transformed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_eeg])
hsp_points.append(self.transformed_hsp_eeg_points)
if self.hpi_weight > 0 and self.has_hpi_data:
mri_points.append(self.transformed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_hpi])
hsp_points.append(self.transformed_hsp_hpi)
if all(len(h) == 0 for h in hsp_points):
return None
mri_points = np.concatenate(mri_points)
hsp_points = np.concatenate(hsp_points)
return np.linalg.norm(mri_points - hsp_points, axis=-1)
@cached_property
def _get_orig_hsp_point_distance(self):
mri_points = self.transformed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_orig_hsp]
hsp_points = self.transformed_orig_hsp_points
return np.linalg.norm(mri_points - hsp_points, axis=-1)
@cached_property
def _get_fid_eval_str(self):
d = (self.lpa_distance * 1000, self.nasion_distance * 1000,
self.rpa_distance * 1000)
return u'Fiducials: %.1f, %.1f, %.1f mm' % d
@cached_property
def _get_points_eval_str(self):
if self.point_distance is None:
return ""
dists = 1000 * self.point_distance
av_dist = np.mean(dists)
std_dist = np.std(dists)
kinds = [kind for kind, check in
(('HSP', self.hsp_weight > 0 and self.has_hsp_data),
('EEG', self.eeg_weight > 0 and self.has_eeg_data),
('HPI', self.hpi_weight > 0 and self.has_hpi_data))
if check]
return (u"%s %s: %.1f ± %.1f mm"
% (len(dists), '+'.join(kinds), av_dist, std_dist))
def _get_raw_subject(self):
# subject name guessed based on the inst file name
if '_' in self.hsp.inst_fname:
subject, _ = self.hsp.inst_fname.split('_', 1)
if subject:
return subject
@on_trait_change('raw_subject')
def _on_raw_subject_change(self, subject):
if self.guess_mri_subject:
if subject in self.mri.subject_source.subjects:
self.mri.subject = subject
elif 'fsaverage' in self.mri.subject_source.subjects:
self.mri.subject = 'fsaverage'
def omit_hsp_points(self, distance):
"""Exclude head shape points that are far away from the MRI head.
Parameters
----------
distance : float
Exclude all points that are further away from the MRI head than
this distance. Previously excluded points are still excluded unless
reset=True is specified. A value of distance <= 0 excludes nothing.
reset : bool
Reset the filter before calculating new omission (default is
False).
"""
distance = float(distance)
if distance <= 0:
return
# find the new filter
mask = self.orig_hsp_point_distance <= distance
n_excluded = np.sum(~mask)
logger.info("Coregistration: Excluding %i head shape points with "
"distance >= %.3f m.", n_excluded, distance)
# set the filter
with warnings.catch_warnings(record=True): # comp to None in Traits
self.hsp.points_filter = mask
def fit_fiducials(self, n_scale_params=None):
"""Find rotation and translation to fit all 3 fiducials."""
if n_scale_params is None:
n_scale_params = self.n_scale_params
head_pts = np.vstack((self.hsp.lpa, self.hsp.nasion, self.hsp.rpa))
mri_pts = np.vstack((self.mri.lpa, self.mri.nasion, self.mri.rpa))
weights = [self.lpa_weight, self.nasion_weight, self.rpa_weight]
assert n_scale_params in (0, 1) # guaranteed by GUI
if n_scale_params == 0:
mri_pts *= self.parameters[6:9] # not done in fit_matched_points
x0 = np.array(self.parameters[:6 + n_scale_params])
est = fit_matched_points(mri_pts, head_pts, x0=x0, out='params',
scale=n_scale_params, weights=weights)
if n_scale_params == 0:
self.parameters[:6] = est
else:
self.parameters[:] = np.concatenate([est, [est[-1]] * 2])
def _setup_icp(self, n_scale_params):
"""Get parameters for an ICP iteration."""
head_pts = list()
mri_pts = list()
weights = list()
if self.has_hsp_data and self.hsp_weight > 0: # should be true
head_pts.append(self.hsp.points)
mri_pts.append(self.processed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_hsp])
weights.append(np.full(len(head_pts[-1]), self.hsp_weight))
for key in ('lpa', 'nasion', 'rpa'):
if getattr(self, 'has_%s_data' % key):
head_pts.append(getattr(self.hsp, key))
if self.icp_fid_match == 'matched':
mri_pts.append(getattr(self.mri, key))
else:
assert self.icp_fid_match == 'nearest'
mri_pts.append(self.processed_high_res_mri_points[
getattr(self, 'nearest_transformed_high_res_mri_idx_%s'
% (key,))])
weights.append(np.full(len(mri_pts[-1]),
getattr(self, '%s_weight' % key)))
if self.has_eeg_data and self.eeg_weight > 0:
head_pts.append(self.hsp.eeg_points)
mri_pts.append(self.processed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_eeg])
weights.append(np.full(len(mri_pts[-1]), self.eeg_weight))
if self.has_hpi_data and self.hpi_weight > 0:
head_pts.append(self.hsp.hpi_points)
mri_pts.append(self.processed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_hpi])
weights.append(np.full(len(mri_pts[-1]), self.hpi_weight))
head_pts = np.concatenate(head_pts)
mri_pts = np.concatenate(mri_pts)
weights = np.concatenate(weights)
if n_scale_params == 0:
mri_pts *= self.parameters[6:9] # not done in fit_matched_points
return head_pts, mri_pts, weights
def fit_icp(self, n_scale_params=None):
"""Find MRI scaling, translation, and rotation to match HSP."""
if n_scale_params is None:
n_scale_params = self.n_scale_params
# Initial guess (current state)
assert n_scale_params in (0, 1, 3)
est = self.parameters[:[6, 7, None, 9][n_scale_params]]
# Do the fits, assigning and evaluating at each step
attr = 'fit_icp_running' if n_scale_params == 0 else 'fits_icp_running'
setattr(self, attr, True)
GUI.process_events() # update the cancel button
for self.iteration in range(self.icp_iterations):
head_pts, mri_pts, weights = self._setup_icp(n_scale_params)
est = fit_matched_points(mri_pts, head_pts, scale=n_scale_params,
x0=est, out='params', weights=weights)
if n_scale_params == 0:
self.parameters[:6] = est
elif n_scale_params == 1:
self.parameters[:] = list(est) + [est[-1]] * 2
else:
self.parameters[:] = est
angle, move, scale = self.changes
if angle <= self.icp_angle and move <= self.icp_distance and \
all(scale <= self.icp_scale):
self.status_text = self.status_text[:-1] + '; converged)'
break
if not getattr(self, attr): # canceled by user
self.status_text = self.status_text[:-1] + '; cancelled)'
break
GUI.process_events() # this will update the head view
else:
self.status_text = self.status_text[:-1] + '; did not converge)'
setattr(self, attr, False)
self.iteration = -1
def get_scaling_job(self, subject_to, skip_fiducials):
"""Find all arguments needed for the scaling worker."""
subjects_dir = self.mri.subjects_dir
subject_from = self.mri.subject
bem_names = []
if self.can_prepare_bem_model and self.prepare_bem_model:
pattern = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_from, name='(.+-bem)')
bem_dir, pattern = os.path.split(pattern)
for filename in os.listdir(bem_dir):
match = re.match(pattern, filename)
if match:
bem_names.append(match.group(1))
return (subjects_dir, subject_from, subject_to, self.parameters[6:9],
skip_fiducials, self.scale_labels, self.copy_annot, bem_names)
def load_trans(self, fname):
"""Load the head-mri transform from a fif file.
Parameters
----------
fname : str
File path.
"""
self.set_trans(_ensure_trans(read_trans(fname, return_all=True),
'mri', 'head')['trans'])
def reset(self):
"""Reset all the parameters affecting the coregistration."""
with busy():
self.reset_traits(('grow_hair', 'n_scaling_params'))
self.parameters[:] = _DEFAULT_PARAMETERS
self.omit_hsp_points(np.inf)
def set_trans(self, mri_head_t):
"""Set rotation and translation params from a transformation matrix.
Parameters
----------
mri_head_t : array, shape (4, 4)
Transformation matrix from MRI to head space.
"""
with busy():
rot_x, rot_y, rot_z = rotation_angles(mri_head_t)
x, y, z = mri_head_t[:3, 3]
self.parameters[:6] = [rot_x, rot_y, rot_z, x, y, z]
def save_trans(self, fname):
"""Save the head-mri transform as a fif file.
Parameters
----------
fname : str
Target file path.
"""
if not self.can_save:
raise RuntimeError("Not enough information for saving transform")
write_trans(fname, Transform('head', 'mri', self.head_mri_t))
def _parameters_items_changed(self):
# Update GUI as necessary
n_scale = self.n_scale_params
for ii, key in enumerate(('rot_x', 'rot_y', 'rot_z')):
val = np.rad2deg(self.parameters[ii])
if val != getattr(self, key): # prevent circular
setattr(self, key, val)
for ii, key in enumerate(('trans_x', 'trans_y', 'trans_z')):
val = self.parameters[ii + 3] * 1e3
if val != getattr(self, key): # prevent circular
setattr(self, key, val)
for ii, key in enumerate(('scale_x', 'scale_y', 'scale_z')):
val = self.parameters[ii + 6] * 1e2
if val != getattr(self, key): # prevent circular
setattr(self, key, val)
# Update the status text
move, angle, percs = self.changes
text = u'Change: Δ=%0.1f mm ∠=%0.2f°' % (move, angle)
if n_scale:
text += ' Scale ' if n_scale == 1 else ' Sx/y/z '
text += '/'.join(['%+0.1f%%' % p for p in percs[:n_scale]])
if self.iteration >= 0:
text += u' (iteration %d/%d)' % (self.iteration + 1,
self.icp_iterations)
self.last_parameters[:] = self.parameters[:]
self.status_text = text
def _rot_x_changed(self):
self.parameters[0] = np.deg2rad(self.rot_x)
def _rot_y_changed(self):
self.parameters[1] = np.deg2rad(self.rot_y)
def _rot_z_changed(self):
self.parameters[2] = np.deg2rad(self.rot_z)
def _trans_x_changed(self):
self.parameters[3] = self.trans_x * 1e-3
def _trans_y_changed(self):
self.parameters[4] = self.trans_y * 1e-3
def _trans_z_changed(self):
self.parameters[5] = self.trans_z * 1e-3
def _scale_x_changed(self):
if self.n_scale_params == 1:
self.parameters[6:9] = [self.scale_x * 1e-2] * 3
else:
self.parameters[6] = self.scale_x * 1e-2
def _scale_y_changed(self):
self.parameters[7] = self.scale_y * 1e-2
def _scale_z_changed(self):
self.parameters[8] = self.scale_z * 1e-2
class CoregFrameHandler(Handler):
"""Check for unfinished processes before closing its window."""
def object_title_changed(self, info):
"""Set the title when it gets changed."""
info.ui.title = info.object.title
def close(self, info, is_ok):
"""Handle the close event."""
if info.object.queue.unfinished_tasks:
information(None, "Can not close the window while saving is still "
"in progress. Please wait until all MRIs are "
"processed.", "Saving Still in Progress")
return False
else:
try: # works on Qt only for now
size = (info.ui.control.width(), info.ui.control.height())
except AttributeError:
size = None
# store configuration, but don't prevent from closing on error
try:
info.object.save_config(size=size)
except Exception as exc:
warnings.warn("Error saving GUI configuration:\n%s" % (exc,))
return True
class CoregPanelHandler(Handler):
"""Open other windows with proper parenting."""
info = Instance(UIInfo)
def object_fitting_options_panel_changed(self, info): # noqa: D102
self.info = info
def object_fitting_options_changed(self, info): # noqa: D102
self.info.object.fitting_options_panel.edit_traits(
parent=self.info.ui.control)
def object_load_trans_changed(self, info): # noqa: D102
# find trans file destination
model = self.info.object.model
raw_dir = os.path.dirname(model.hsp.file)
subject = model.mri.subject
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject)
dlg = FileDialog(action="open", wildcard=trans_wildcard,
default_path=trans_file, parent=self.info.ui.control)
if dlg.open() != OK:
return
trans_file = dlg.path
try:
model.load_trans(trans_file)
except Exception as e:
error(None, "Error loading trans file %s: %s (See terminal "
"for details)" % (trans_file, e), "Error Loading Trans File")
raise
def object_save_changed(self, info): # noqa: D102
obj = self.info.object
subjects_dir = obj.model.mri.subjects_dir
subject_from = obj.model.mri.subject
# check that fiducials are saved
skip_fiducials = False
if obj.n_scale_params and not _find_fiducials_files(subject_from,
subjects_dir):
msg = ("No fiducials file has been found for {src}. If fiducials "
"are not saved, they will not be available in the scaled "
"MRI. Should the current fiducials be saved now? "
"Select Yes to save the fiducials at "
"{src}/bem/{src}-fiducials.fif. "
"Select No to proceed scaling the MRI without fiducials.".
format(src=subject_from))
title = "Save Fiducials for %s?" % subject_from
rc = confirm(self.info.ui.control, msg, title, cancel=True,
default=CANCEL)
if rc == CANCEL:
return
elif rc == YES:
obj.model.mri.save(obj.model.mri.default_fid_fname)
elif rc == NO:
skip_fiducials = True
else:
raise RuntimeError("rc=%s" % repr(rc))
# find target subject
if obj.n_scale_params:
subject_to = obj.model.raw_subject or subject_from
mridlg = NewMriDialog(subjects_dir=subjects_dir,
subject_from=subject_from,
subject_to=subject_to)
ui = mridlg.edit_traits(kind='modal',
parent=self.info.ui.control)
if not ui.result: # i.e., user pressed cancel
return
subject_to = mridlg.subject_to
else:
subject_to = subject_from
# find trans file destination
raw_dir = os.path.dirname(obj.model.hsp.file)
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject_to)
dlg = FileDialog(action="save as", wildcard=trans_wildcard,
default_path=trans_file,
parent=self.info.ui.control)
dlg.open()
if dlg.return_code != OK:
return
trans_file = dlg.path
if not trans_file.endswith('.fif'):
trans_file += '.fif'
if os.path.exists(trans_file):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
# save the trans file
try:
obj.model.save_trans(trans_file)
except Exception as e:
error(None, "Error saving -trans.fif file: %s (See terminal for "
"details)" % (e,), "Error Saving Trans File")
raise
# save the scaled MRI
if obj.n_scale_params:
job = obj.model.get_scaling_job(subject_to, skip_fiducials)
obj.queue.put(job)
obj.queue_len += 1
def _make_view_data_panel(scrollable=False):
view = View(VGroup(
VGroup(Item('subject_panel', style='custom'), label="MRI Subject",
show_border=_SHOW_BORDER, show_labels=False),
VGroup(Item('lock_fiducials', style='custom',
editor=EnumEditor(cols=2, values={False: '2:Edit',
True: '1:Lock'}),
enabled_when='fid_ok'),
HGroup(Item('hsp_always_visible',
label='Show head shape points', show_label=True,
enabled_when='not lock_fiducials', width=-1),
show_left=False),
Item('fid_panel', style='custom'), label="MRI Fiducials",
show_border=_SHOW_BORDER, show_labels=False),
VGroup(Item('raw_src', style="custom"),
HGroup('guess_mri_subject',
Label('Guess subject from name'), show_labels=False),
VGrid(Item('grow_hair', editor=laggy_float_editor_mm,
width=_MM_WIDTH),
Label(u'ΔHair', show_label=True, width=-1), '0',
Item('distance', show_label=False, width=_MM_WIDTH,
editor=laggy_float_editor_mm),
Item('omit_points', width=_BUTTON_WIDTH),
Item('reset_omit_points', width=_RESET_WIDTH),
columns=3, show_labels=False),
Item('omitted_info', style='readonly',
width=_REDUCED_TEXT_WIDTH), label='Digitization source',
show_border=_SHOW_BORDER, show_labels=False),
VGroup(HGroup(Item('headview', style='custom'), Spring(),
show_labels=False),
Item('view_options', width=_REDUCED_TEXT_WIDTH),
label='View', show_border=_SHOW_BORDER, show_labels=False),
Spring(),
show_labels=False), kind='panel', buttons=[UndoButton],
scrollable=scrollable, handler=DataPanelHandler())
return view
def _make_view_coreg_panel(scrollable=False):
"""Generate View for CoregPanel."""
view = View(VGroup(
# Scaling
HGroup(Item('n_scale_params', label='Scaling mode',
editor=EnumEditor(values={0: '1:None',
1: '2:Uniform',
3: '3:3-axis'})), Spring()),
VGrid(Item('scale_x', editor=laggy_float_editor_scale,
show_label=True, tooltip="Scale along right-left axis (%)",
enabled_when='n_scale_params > 0', width=_SCALE_WIDTH),
Item('scale_x_dec', enabled_when='n_scale_params > 0',
width=_INC_BUTTON_WIDTH),
Item('scale_x_inc', enabled_when='n_scale_params > 0',
width=_INC_BUTTON_WIDTH),
Item('scale_step', tooltip="Scaling step (%)",
enabled_when='n_scale_params > 0', width=_SCALE_STEP_WIDTH),
Spring(),
Item('scale_y', editor=laggy_float_editor_scale, show_label=True,
enabled_when='n_scale_params > 1',
tooltip="Scale along anterior-posterior axis (%)",
width=_SCALE_WIDTH),
Item('scale_y_dec', enabled_when='n_scale_params > 1',
width=_INC_BUTTON_WIDTH),
Item('scale_y_inc', enabled_when='n_scale_params > 1',
width=_INC_BUTTON_WIDTH),
Label('(Step)', width=_SCALE_WIDTH),
Spring(),
Item('scale_z', editor=laggy_float_editor_scale, show_label=True,
enabled_when='n_scale_params > 1', width=_SCALE_WIDTH,
tooltip="Scale along anterior-posterior axis (%)"),
Item('scale_z_dec', enabled_when='n_scale_params > 1',
width=_INC_BUTTON_WIDTH),
Item('scale_z_inc', enabled_when='n_scale_params > 1',
width=_INC_BUTTON_WIDTH),
'0',
Spring(),
label='Scaling parameters', show_labels=False, columns=5,
show_border=_SHOW_BORDER),
VGrid(Item('fits_icp', enabled_when='n_scale_params > 0 and '
'n_icp_points >= 10',
tooltip="Rotate, translate, and scale the MRI to minimize "
"the distance from each digitizer point to the closest MRI "
"point (one ICP iteration)", width=_BUTTON_WIDTH),
Item('fits_fid', enabled_when='n_scale_params == 1 and '
'has_fid_data',
tooltip="Rotate, translate, and scale the MRI to minimize "
"the distance of the three fiducials.",
width=_BUTTON_WIDTH),
Item('cancels_icp', enabled_when="fits_icp_running",
tooltip='Stop ICP fitting', width=_RESET_WIDTH),
Item('reset_scale', enabled_when='n_scale_params',
tooltip="Reset scaling parameters", width=_RESET_WIDTH),
show_labels=False, columns=4),
# Translation and rotation
VGrid(Item('trans_x', editor=laggy_float_editor_mm, show_label=True,
tooltip="Move along right-left axis", width=_MM_WIDTH),
Item('trans_x_dec', width=_INC_BUTTON_WIDTH),
Item('trans_x_inc', width=_INC_BUTTON_WIDTH),
Item('trans_step', tooltip="Movement step (mm)",
width=_MM_STEP_WIDTH),
Spring(),
Item('trans_y', editor=laggy_float_editor_mm, show_label=True,
tooltip="Move along anterior-posterior axis",
width=_MM_WIDTH),
Item('trans_y_dec', width=_INC_BUTTON_WIDTH),
Item('trans_y_inc', width=_INC_BUTTON_WIDTH),
Label('(Step)', width=_MM_WIDTH),
Spring(),
Item('trans_z', editor=laggy_float_editor_mm, show_label=True,
tooltip="Move along anterior-posterior axis",
width=_MM_WIDTH),
Item('trans_z_dec', width=_INC_BUTTON_WIDTH),
Item('trans_z_inc', width=_INC_BUTTON_WIDTH),
'0',
Spring(),
Item('rot_x', editor=laggy_float_editor_deg, show_label=True,
tooltip="Tilt the digitization backward (-) or forward (+)",
width=_DEG_WIDTH),
Item('rot_x_dec', width=_INC_BUTTON_WIDTH),
Item('rot_x_inc', width=_INC_BUTTON_WIDTH),
Item('rot_step', tooltip=u"Rotation step (°)",
width=_DEG_STEP_WIDTH),
Spring(),
Item('rot_y', editor=laggy_float_editor_deg, show_label=True,
tooltip="Tilt the digitization rightward (-) or "
"leftward (+)", width=_DEG_WIDTH),
Item('rot_y_dec', width=_INC_BUTTON_WIDTH),
Item('rot_y_inc', width=_INC_BUTTON_WIDTH),
Label('(Step)', width=_DEG_WIDTH),
Spring(),
Item('rot_z', editor=laggy_float_editor_deg, show_label=True,
tooltip="Turn the digitization leftward (-) or "
"rightward (+)", width=_DEG_WIDTH),
Item('rot_z_dec', width=_INC_BUTTON_WIDTH),
Item('rot_z_inc', width=_INC_BUTTON_WIDTH),
'0',
Spring(),
columns=5, show_labels=False, show_border=_SHOW_BORDER,
label=u'Translation (Δ) and Rotation (∠)'),
VGroup(Item('fit_icp', enabled_when='n_icp_points >= 10',
tooltip="Rotate and translate the MRI to minimize the "
"distance from each digitizer point to the closest MRI "
"point (one ICP iteration)", width=_BUTTON_WIDTH),
Item('fit_fid', enabled_when="has_fid_data",
tooltip="Rotate and translate the MRI to minimize the "
"distance of the three fiducials.", width=_BUTTON_WIDTH),
Item('cancel_icp', enabled_when="fit_icp_running",
tooltip='Stop ICP iterations', width=_RESET_WIDTH),
Item('reset_tr', tooltip="Reset translation and rotation.",
width=_RESET_WIDTH),
show_labels=False, columns=4),
# Fitting weights
Item('fid_eval_str', style='readonly', tooltip='Fiducial differences',
width=_REDUCED_TEXT_WIDTH),
Item('points_eval_str', style='readonly',
tooltip='Point error (μ ± σ)', width=_REDUCED_TEXT_WIDTH),
Item('fitting_options', width=_REDUCED_TEXT_WIDTH, show_label=False),
VGrid(Item('scale_labels', label="Scale label files",
enabled_when='n_scale_params > 0'),
Item('copy_annot', label="Copy annotation files",
enabled_when='n_scale_params > 0'),
Item('prepare_bem_model', label="Prepare BEM",
enabled_when='can_prepare_bem_model'),
show_left=False, label='Subject-saving options', columns=1,
show_border=_SHOW_BORDER),
VGrid(Item('save', enabled_when='can_save',
tooltip="Save the trans file and (if scaling is enabled) "
"the scaled MRI", width=_BUTTON_WIDTH),
Item('load_trans', width=_BUTTON_WIDTH,
tooltip="Load Head<->MRI trans file"),
Item('reset_params', tooltip="Reset all coregistration "
"parameters", width=_RESET_WIDTH),
show_labels=False, columns=3),
Spring(),
show_labels=False), kind='panel', buttons=[UndoButton],
scrollable=scrollable, handler=CoregPanelHandler())
return view
class FittingOptionsPanel(HasTraits):
"""View options panel."""
model = Instance(CoregModel)
lpa_weight = DelegatesTo('model')
nasion_weight = DelegatesTo('model')
rpa_weight = DelegatesTo('model')
hsp_weight = DelegatesTo('model')
eeg_weight = DelegatesTo('model')
hpi_weight = DelegatesTo('model')
has_lpa_data = DelegatesTo('model')
has_nasion_data = DelegatesTo('model')
has_rpa_data = DelegatesTo('model')
has_hsp_data = DelegatesTo('model')
has_eeg_data = DelegatesTo('model')
has_hpi_data = DelegatesTo('model')
icp_iterations = DelegatesTo('model')
icp_angle = DelegatesTo('model')
icp_distance = DelegatesTo('model')
icp_scale = DelegatesTo('model')
icp_fid_match = DelegatesTo('model')
n_scale_params = DelegatesTo('model')
view = View(VGroup(
VGrid(HGroup(Item('icp_iterations', label='Iterations',
width=_MM_WIDTH, tooltip='Maximum ICP iterations to '
'perform (per click)'),
Spring(), show_labels=True), label='ICP iterations (max)',
show_border=_SHOW_BORDER),
VGrid(Item('icp_angle', label=u'Angle (°)', width=_MM_WIDTH,
tooltip='Angle convergence threshold'),
Item('icp_distance', label='Distance (mm)', width=_MM_WIDTH,
tooltip='Distance convergence threshold'),
Item('icp_scale', label='Scale (%)',
tooltip='Scaling convergence threshold', width=_MM_WIDTH,
enabled_when='n_scale_params > 0'),
show_labels=True, label='ICP convergence limits', columns=3,
show_border=_SHOW_BORDER),
VGrid(Item('icp_fid_match', width=-1, show_label=False,
editor=EnumEditor(values=dict(
nearest='1:Closest to surface',
matched='2:MRI fiducials'), cols=2,
format_func=lambda x: x),
tooltip='Match digitization fiducials to MRI fiducials or '
'the closest surface point', style='custom'),
label='Fiducial point matching', show_border=_SHOW_BORDER),
VGrid(
VGrid(Item('lpa_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for LPA", width=_WEIGHT_WIDTH,
enabled_when='has_lpa_data', label='LPA'),
Item('nasion_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for nasion", label='Nasion',
width=_WEIGHT_WIDTH, enabled_when='has_nasion_data'),
Item('rpa_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for RPA", width=_WEIGHT_WIDTH,
enabled_when='has_rpa_data', label='RPA'),
columns=3, show_labels=True, show_border=_SHOW_BORDER,
label='Fiducials'),
VGrid(Item('hsp_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for head shape points",
enabled_when='has_hsp_data',
label='HSP', width=_WEIGHT_WIDTH,),
Item('eeg_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for EEG points", label='EEG',
enabled_when='has_eeg_data', width=_WEIGHT_WIDTH),
Item('hpi_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for HPI points", label='HPI',
enabled_when='has_hpi_data', width=_WEIGHT_WIDTH),
columns=3, show_labels=True, show_border=_SHOW_BORDER,
label='Other points (closest-point matched)'),
show_labels=False, label='Point weights', columns=2,
show_border=_SHOW_BORDER),
), title="Fitting options")
_DEFAULT_PARAMETERS = (0., 0., 0., 0., 0., 0., 1., 1., 1.)
class CoregPanel(HasPrivateTraits):
"""Coregistration panel for Head<->MRI with scaling."""
model = Instance(CoregModel)
# parameters
reset_params = Button(label=_RESET_LABEL)
n_scale_params = DelegatesTo('model')
parameters = DelegatesTo('model')
scale_step = Float(1.)
scale_x = DelegatesTo('model')
scale_x_dec = Button('-')
scale_x_inc = Button('+')
scale_y = DelegatesTo('model')
scale_y_dec = Button('-')
scale_y_inc = Button('+')
scale_z = DelegatesTo('model')
scale_z_dec = Button('-')
scale_z_inc = Button('+')
rot_step = Float(1.)
rot_x = DelegatesTo('model')
rot_x_dec = Button('-')
rot_x_inc = Button('+')
rot_y = DelegatesTo('model')
rot_y_dec = Button('-')
rot_y_inc = Button('+')
rot_z = DelegatesTo('model')
rot_z_dec = Button('-')
rot_z_inc = Button('+')
trans_step = Float(1.)
trans_x = DelegatesTo('model')
trans_x_dec = Button('-')
trans_x_inc = Button('+')
trans_y = DelegatesTo('model')
trans_y_dec = Button('-')
trans_y_inc = Button('+')
trans_z = DelegatesTo('model')
trans_z_dec = Button('-')
trans_z_inc = Button('+')
# fitting
has_lpa_data = DelegatesTo('model')
has_nasion_data = DelegatesTo('model')
has_rpa_data = DelegatesTo('model')
has_fid_data = DelegatesTo('model')
has_hsp_data = DelegatesTo('model')
has_eeg_data = DelegatesTo('model')
has_hpi_data = DelegatesTo('model')
n_icp_points = DelegatesTo('model')
# fitting with scaling
fits_icp = Button(label='Fit (ICP)')
fits_fid = Button(label='Fit Fid.')
cancels_icp = Button(u'■')
reset_scale = Button(label=_RESET_LABEL)
fits_icp_running = DelegatesTo('model')
# fitting without scaling
fit_icp = Button(label='Fit (ICP)')
fit_fid = Button(label='Fit Fid.')
cancel_icp = Button(label=u'■')
reset_tr = Button(label=_RESET_LABEL)
fit_icp_running = DelegatesTo('model')
# fit info
fid_eval_str = DelegatesTo('model')
points_eval_str = DelegatesTo('model')
# saving
can_prepare_bem_model = DelegatesTo('model')
can_save = DelegatesTo('model')
scale_labels = DelegatesTo('model')
copy_annot = DelegatesTo('model')
prepare_bem_model = DelegatesTo('model')
save = Button(label="Save...")
load_trans = Button(label='Load...')
queue = Instance(queue.Queue, ())
queue_feedback = Str('')
queue_current = Str('')
queue_len = Int(0)
queue_status_text = Property(
Str, depends_on=['queue_feedback', 'queue_current', 'queue_len'])
fitting_options_panel = Instance(FittingOptionsPanel)
fitting_options = Button('Fitting options...')
def _fitting_options_panel_default(self):
return FittingOptionsPanel(model=self.model)
view = _make_view_coreg_panel()
def __init__(self, *args, **kwargs): # noqa: D102
super(CoregPanel, self).__init__(*args, **kwargs)
# Setup scaling worker
def worker():
while True:
(subjects_dir, subject_from, subject_to, scale, skip_fiducials,
include_labels, include_annot, bem_names) = self.queue.get()
self.queue_len -= 1
# Scale MRI files
self.queue_current = 'Scaling %s...' % subject_to
try:
scale_mri(subject_from, subject_to, scale, True,
subjects_dir, skip_fiducials, include_labels,
include_annot)
except Exception:
logger.error('Error scaling %s:\n' % subject_to +
traceback.format_exc())
self.queue_feedback = ('Error scaling %s (see Terminal)' %
subject_to)
bem_names = () # skip bem solutions
else:
self.queue_feedback = 'Done scaling %s' % subject_to
# Precompute BEM solutions
for bem_name in bem_names:
self.queue_current = ('Computing %s solution...' %
bem_name)
try:
bem_file = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_to,
name=bem_name)
bemsol = make_bem_solution(bem_file)
write_bem_solution(bem_file[:-4] + '-sol.fif', bemsol)
except Exception:
logger.error('Error computing %s solution:\n' %
bem_name + traceback.format_exc())
self.queue_feedback = ('Error computing %s solution '
'(see Terminal)' % bem_name)
else:
self.queue_feedback = ('Done computing %s solution' %
bem_name)
# Finalize
self.queue_current = ''
self.queue.task_done()
t = Thread(target=worker)
t.daemon = True
t.start()
@cached_property
def _get_queue_status_text(self):
items = []
if self.queue_current:
items.append(self.queue_current)
if self.queue_feedback:
items.append(self.queue_feedback)
if self.queue_len:
items.append("%i queued" % self.queue_len)
return ' | '.join(items)
@cached_property
def _get_rotation(self):
rot = np.array([self.rot_x, self.rot_y, self.rot_z])
return rot
@cached_property
def _get_translation(self):
trans = np.array([self.trans_x, self.trans_y, self.trans_z])
return trans
def _n_scale_params_fired(self):
if self.n_scale_params == 0:
use = [1] * 3
elif self.n_scale_params == 1:
use = [np.mean([self.scale_x, self.scale_y, self.scale_z]) /
100.] * 3
else:
use = self.parameters[6:9]
self.parameters[6:9] = use
def _fit_fid_fired(self):
with busy():
self.model.fit_fiducials(0)
def _fit_icp_fired(self):
with busy():
self.model.fit_icp(0)
def _fits_fid_fired(self):
with busy():
self.model.fit_fiducials()
def _fits_icp_fired(self):
with busy():
self.model.fit_icp()
def _cancel_icp_fired(self):
self.fit_icp_running = False
def _cancels_icp_fired(self):
self.fits_icp_running = False
def _reset_scale_fired(self):
self.reset_traits(('scale_x', 'scale_y', 'scale_z'))
def _reset_tr_fired(self):
self.reset_traits(('trans_x', 'trans_y', 'trans_z',
'rot_x', 'rot_y', 'rot_z'))
def _reset_params_fired(self):
self.model.reset()
def _rot_x_dec_fired(self):
self.rot_x -= self.rot_step
def _rot_x_inc_fired(self):
self.rot_x += self.rot_step
def _rot_y_dec_fired(self):
self.rot_y -= self.rot_step
def _rot_y_inc_fired(self):
self.rot_y += self.rot_step
def _rot_z_dec_fired(self):
self.rot_z -= self.rot_step
def _rot_z_inc_fired(self):
self.rot_z += self.rot_step
def _scale_x_dec_fired(self):
self.scale_x -= self.scale_step
def _scale_x_inc_fired(self):
self.scale_x += self.scale_step
def _scale_y_dec_fired(self):
self.scale_y -= self.scale_step
def _scale_y_inc_fired(self):
self.scale_y += self.scale_step
def _scale_z_dec_fired(self):
self.scale_z -= self.scale_step
def _scale_z_inc_fired(self):
self.scale_z += self.scale_step
def _trans_x_dec_fired(self):
self.trans_x -= self.trans_step
def _trans_x_inc_fired(self):
self.trans_x += self.trans_step
def _trans_y_dec_fired(self):
self.trans_y -= self.trans_step
def _trans_y_inc_fired(self):
self.trans_y += self.trans_step
def _trans_z_dec_fired(self):
self.trans_z -= self.trans_step
def _trans_z_inc_fired(self):
self.trans_z += self.trans_step
class NewMriDialog(HasPrivateTraits):
"""New MRI dialog."""
# Dialog to determine target subject name for a scaled MRI
subjects_dir = Directory
subject_to = Str
subject_from = Str
subject_to_dir = Property(depends_on=['subjects_dir', 'subject_to'])
subject_to_exists = Property(Bool, depends_on='subject_to_dir')
feedback = Str(' ' * 100)
can_overwrite = Bool
overwrite = Bool
can_save = Bool
view = View(Item('subject_to', label='New MRI Subject Name', tooltip="A "
"new folder with this name will be created in the "
"current subjects_dir for the scaled MRI files"),
Item('feedback', show_label=False, style='readonly'),
Item('overwrite', enabled_when='can_overwrite', tooltip="If a "
"subject with the chosen name exists, delete the old "
"subject"),
buttons=[CancelButton,
Action(name='OK', enabled_when='can_save')])
def _can_overwrite_changed(self, new):
if not new:
self.overwrite = False
@cached_property
def _get_subject_to_dir(self):
return os.path.join(self.subjects_dir, self.subject_to)
@cached_property
def _get_subject_to_exists(self):
if not self.subject_to:
return False
elif os.path.exists(self.subject_to_dir):
return True
else:
return False
@on_trait_change('subject_to_dir,overwrite')
def update_dialog(self):
if not self.subject_from:
# weird trait state that occurs even when subject_from is set
return
elif not self.subject_to:
self.feedback = "No subject specified..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to == self.subject_from:
self.feedback = "Must be different from MRI source subject..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to_exists:
if self.overwrite:
self.feedback = "%s will be overwritten." % self.subject_to
self.can_save = True
self.can_overwrite = True
else:
self.feedback = "Subject already exists..."
self.can_save = False
self.can_overwrite = True
else:
self.feedback = "Name ok."
self.can_save = True
self.can_overwrite = False
def _make_view(tabbed=False, split=False, width=800, height=600,
scrollable=True):
"""Create a view for the CoregFrame."""
# Set the width to 0.99 to "push out" as much as possible, use
# scene_width in the View below
scene = Item('scene', show_label=False, width=0.99,
editor=SceneEditor(scene_class=MayaviScene))
data_panel = VGroup(
Item('data_panel', style='custom',
width=_COREG_WIDTH if scrollable else 1,
editor=InstanceEditor(view=_make_view_data_panel(scrollable))),
label='Data', show_border=not scrollable, show_labels=False)
# Setting `scrollable=True` for a Group does not seem to have any effect
# (macOS), in order to be effective the parameter has to be set for a View
# object; hence we use a special InstanceEditor to set the parameter
# programmatically:
coreg_panel = VGroup(
Item('coreg_panel', style='custom',
width=_COREG_WIDTH if scrollable else 1,
editor=InstanceEditor(view=_make_view_coreg_panel(scrollable))),
label="Coregistration", show_border=not scrollable, show_labels=False,
enabled_when="data_panel.fid_panel.locked")
main_layout = 'split' if split else 'normal'
if tabbed:
main = HGroup(scene,
Group(data_panel, coreg_panel, show_labels=False,
layout='tabbed'),
layout=main_layout)
else:
main = HGroup(data_panel, scene, coreg_panel, show_labels=False,
layout=main_layout)
# Here we set the width and height to impossibly small numbers to force the
# window to be as tight as possible
view = View(main, resizable=True, handler=CoregFrameHandler(),
buttons=NoButtons, width=width, height=height,
statusbar=[StatusItem('status_text', width=0.55),
StatusItem('queue_status_text', width=0.45)])
return view
class ViewOptionsPanel(HasTraits):
"""View options panel."""
mri_obj = Instance(SurfaceObject)
hsp_obj = Instance(PointObject)
eeg_obj = Instance(PointObject)
hpi_obj = Instance(PointObject)
hsp_cf_obj = Instance(PointObject)
mri_cf_obj = Instance(PointObject)
bgcolor = RGBColor()
coord_frame = Enum('mri', 'head', label='Display coordinate frame')
head_high_res = Bool(True, label='Show high-resolution head')
advanced_rendering = Bool(True, label='Use advanced OpenGL',
desc='Enable advanced OpenGL methods that do '
'not work with all renderers (e.g., depth '
'peeling)')
view = View(
VGroup(
Item('mri_obj', style='custom', label="MRI"),
Item('hsp_obj', style='custom', label="Head shape"),
Item('eeg_obj', style='custom', label='EEG'),
Item('hpi_obj', style='custom', label='HPI'),
VGrid(Item('coord_frame', style='custom',
editor=EnumEditor(values={'mri': '1:MRI',
'head': '2:Head'}, cols=2,
format_func=_pass)),
Item('head_high_res'), Spring(),
Item('advanced_rendering'),
Spring(), Spring(), columns=3, show_labels=True),
Item('hsp_cf_obj', style='custom', label='Head axes'),
Item('mri_cf_obj', style='custom', label='MRI axes'),
HGroup(Item('bgcolor', label='Background'), Spring()),
), title="Display options")
class DataPanelHandler(Handler):
"""Open other windows with proper parenting."""
info = Instance(UIInfo)
def object_view_options_panel_changed(self, info): # noqa: D102
self.info = info
def object_view_options_changed(self, info): # noqa: D102
self.info.object.view_options_panel.edit_traits(
parent=self.info.ui.control)
class DataPanel(HasTraits):
"""Data loading panel."""
# Set by CoregPanel
model = Instance(CoregModel)
scene = Instance(MlabSceneModel, ())
lock_fiducials = DelegatesTo('model')
guess_mri_subject = DelegatesTo('model')
raw_src = DelegatesTo('model', 'hsp')
# Set internally
subject_panel = Instance(SubjectSelectorPanel)
fid_panel = Instance(FiducialsPanel)
headview = Instance(HeadViewController)
view_options_panel = Instance(ViewOptionsPanel)
hsp_always_visible = Bool(False, label="Always Show Head Shape")
view_options = Button(label="Display options...")
# Omit Points
distance = Float(10., desc="maximal distance for head shape points from "
"the surface (mm)")
omit_points = Button(label='Omit', desc="to omit head shape points "
"for the purpose of the automatic coregistration "
"procedure (mm).")
grow_hair = DelegatesTo('model')
reset_omit_points = Button(label=_RESET_LABEL, desc="to reset the "
"omission of head shape points to include all.")
omitted_info = Str('No points omitted')
def _subject_panel_default(self):
return SubjectSelectorPanel(model=self.model.mri.subject_source)
def _fid_panel_default(self):
return FiducialsPanel(model=self.model.mri, headview=self.headview)
def _headview_default(self):
return HeadViewController(system='RAS', scene=self.scene)
def _omit_points_fired(self):
distance = self.distance / 1000.
self.model.omit_hsp_points(distance)
n_omitted = self.model.hsp.n_omitted
self.omitted_info = (
"%s pt%s omitted (%0.1f mm)"
% (n_omitted if n_omitted > 0 else 'No', _pl(n_omitted),
self.distance))
@on_trait_change('model:hsp:file')
def _file_change(self):
self._reset_omit_points_fired()
def _reset_omit_points_fired(self):
self.model.omit_hsp_points(np.inf)
self.omitted_info = 'No points omitted (reset)'
class CoregFrame(HasTraits):
"""GUI for head-MRI coregistration."""
model = Instance(CoregModel)
scene = Instance(MlabSceneModel, ())
head_high_res = Bool(True)
advanced_rendering = Bool(True)
data_panel = Instance(DataPanel)
coreg_panel = Instance(CoregPanel) # right panel
project_to_surface = DelegatesTo('eeg_obj')
orient_to_surface = DelegatesTo('hsp_obj')
scale_by_distance = DelegatesTo('hsp_obj')
mark_inside = DelegatesTo('hsp_obj')
status_text = DelegatesTo('model')
queue_status_text = DelegatesTo('coreg_panel')
fid_ok = DelegatesTo('model', 'mri.fid_ok')
lock_fiducials = DelegatesTo('model')
title = Str('MNE Coreg')
# visualization (MRI)
mri_obj = Instance(SurfaceObject)
mri_lpa_obj = Instance(PointObject)
mri_nasion_obj = Instance(PointObject)
mri_rpa_obj = Instance(PointObject)
bgcolor = RGBColor((0.5, 0.5, 0.5))
# visualization (Digitization)
hsp_obj = Instance(PointObject)
eeg_obj = Instance(PointObject)
hpi_obj = Instance(PointObject)
hsp_lpa_obj = Instance(PointObject)
hsp_nasion_obj = Instance(PointObject)
hsp_rpa_obj = Instance(PointObject)
hsp_visible = Property(depends_on=['data_panel:hsp_always_visible',
'lock_fiducials'])
# Coordinate frame axes
hsp_cf_obj = Instance(PointObject)
mri_cf_obj = Instance(PointObject)
picker = Instance(object)
# Processing
queue = DelegatesTo('coreg_panel')
view = _make_view()
def _model_default(self):
return CoregModel(
scale_labels=self._config.get(
'MNE_COREG_SCALE_LABELS', 'true') == 'true',
copy_annot=self._config.get(
'MNE_COREG_COPY_ANNOT', 'true') == 'true',
prepare_bem_model=self._config.get(
'MNE_COREG_PREPARE_BEM', 'true') == 'true')
def _data_panel_default(self):
return DataPanel(model=self.model, scene=self.scene)
def _coreg_panel_default(self):
return CoregPanel(model=self.model)
def __init__(self, raw=None, subject=None, subjects_dir=None,
guess_mri_subject=True, head_opacity=1.,
head_high_res=True, trans=None, config=None,
project_eeg=False, orient_to_surface=False,
scale_by_distance=False, mark_inside=False,
interaction='trackball', scale=0.16,
advanced_rendering=True): # noqa: D102
self._config = config or {}
super(CoregFrame, self).__init__(guess_mri_subject=guess_mri_subject,
head_high_res=head_high_res,
advanced_rendering=advanced_rendering)
self._initial_kwargs = dict(project_eeg=project_eeg,
orient_to_surface=orient_to_surface,
scale_by_distance=scale_by_distance,
mark_inside=mark_inside,
head_opacity=head_opacity,
interaction=interaction,
scale=scale)
self._locked_opacity = self._initial_kwargs['head_opacity']
if not 0 <= head_opacity <= 1:
raise ValueError(
"head_opacity needs to be a floating point number between 0 "
"and 1, got %r" % (head_opacity,))
if (subjects_dir is not None) and os.path.isdir(subjects_dir):
self.model.mri.subjects_dir = subjects_dir
if raw is not None:
self.model.hsp.file = raw
if subject is not None:
if subject not in self.model.mri.subject_source.subjects:
msg = "%s is not a valid subject. " % subject
# no subjects -> ['']
if any(self.model.mri.subject_source.subjects):
ss = ', '.join(self.model.mri.subject_source.subjects)
msg += ("The following subjects have been found: %s "
"(subjects_dir=%s). " %
(ss, self.model.mri.subjects_dir))
else:
msg += ("No subjects were found in subjects_dir=%s. " %
self.model.mri.subjects_dir)
msg += ("Make sure all MRI subjects have head shape files "
"(run $ mne make_scalp_surfaces).")
raise ValueError(msg)
self.model.mri.subject = subject
if trans is not None:
try:
self.model.load_trans(trans)
except Exception as e:
error(None, "Error loading trans file %s: %s (See terminal "
"for details)" % (trans, e), "Error Loading Trans File")
@on_trait_change('subject_panel:subject')
def _set_title(self):
self.title = '%s - MNE Coreg' % self.model.mri.subject
@on_trait_change('scene:activated')
def _init_plot(self):
_toggle_mlab_render(self, False)
self._on_advanced_rendering_change()
lpa_color = defaults['lpa_color']
nasion_color = defaults['nasion_color']
rpa_color = defaults['rpa_color']
# MRI scalp
#
# Due to MESA rendering / z-order bugs, this should be added and
# rendered first (see gh-5375).
color = defaults['head_color']
self.mri_obj = SurfaceObject(
points=np.empty((0, 3)), color=color, tris=np.empty((0, 3)),
scene=self.scene, name="MRI Scalp", block_behind=True,
# opacity=self._initial_kwargs['head_opacity'],
# setting opacity here causes points to be
# [[0, 0, 0]] -- why??
)
self.mri_obj.opacity = self._initial_kwargs['head_opacity']
self.data_panel.fid_panel.hsp_obj = self.mri_obj
self._update_mri_obj()
self.mri_obj.plot()
# Do not do sync_trait here, instead use notifiers elsewhere
# MRI Fiducials
point_scale = defaults['mri_fid_scale']
self.mri_lpa_obj = PointObject(scene=self.scene, color=lpa_color,
has_norm=True, point_scale=point_scale,
name='LPA')
self.model.sync_trait('transformed_mri_lpa',
self.mri_lpa_obj, 'points', mutual=False)
self.mri_nasion_obj = PointObject(scene=self.scene, color=nasion_color,
has_norm=True,
point_scale=point_scale,
name='Nasion')
self.model.sync_trait('transformed_mri_nasion',
self.mri_nasion_obj, 'points', mutual=False)
self.mri_rpa_obj = PointObject(scene=self.scene, color=rpa_color,
has_norm=True, point_scale=point_scale,
name='RPA')
self.model.sync_trait('transformed_mri_rpa',
self.mri_rpa_obj, 'points', mutual=False)
# Digitizer Head Shape
kwargs = dict(
view='cloud', scene=self.scene, resolution=20,
orient_to_surface=self._initial_kwargs['orient_to_surface'],
scale_by_distance=self._initial_kwargs['scale_by_distance'],
mark_inside=self._initial_kwargs['mark_inside'])
self.hsp_obj = PointObject(
color=defaults['extra_color'], name='Extra', has_norm=True,
point_scale=defaults['extra_scale'], **kwargs)
self.model.sync_trait('transformed_hsp_points',
self.hsp_obj, 'points', mutual=False)
# Digitizer EEG
self.eeg_obj = PointObject(
color=defaults['eeg_color'], point_scale=defaults['eeg_scale'],
name='EEG', projectable=True, has_norm=True,
project_to_surface=self._initial_kwargs['project_eeg'], **kwargs)
self.model.sync_trait('transformed_hsp_eeg_points',
self.eeg_obj, 'points', mutual=False)
# Digitizer HPI
self.hpi_obj = PointObject(
color=defaults['hpi_color'], name='HPI', has_norm=True,
point_scale=defaults['hpi_scale'], **kwargs)
self.model.sync_trait('transformed_hsp_hpi',
self.hpi_obj, 'points', mutual=False)
for p in (self.hsp_obj, self.eeg_obj, self.hpi_obj):
p.inside_color = self.mri_obj.color
self.mri_obj.sync_trait('color', p, 'inside_color',
mutual=False)
# Digitizer Fiducials
point_scale = defaults['dig_fid_scale']
opacity = defaults['dig_fid_opacity']
self.hsp_lpa_obj = PointObject(
scene=self.scene, color=lpa_color, opacity=opacity,
has_norm=True, point_scale=point_scale, name='HSP-LPA')
self.model.sync_trait('transformed_hsp_lpa',
self.hsp_lpa_obj, 'points', mutual=False)
self.hsp_nasion_obj = PointObject(
scene=self.scene, color=nasion_color, opacity=opacity,
has_norm=True, point_scale=point_scale, name='HSP-Nasion')
self.model.sync_trait('transformed_hsp_nasion',
self.hsp_nasion_obj, 'points', mutual=False)
self.hsp_rpa_obj = PointObject(
scene=self.scene, color=rpa_color, opacity=opacity,
has_norm=True, point_scale=point_scale, name='HSP-RPA')
self.model.sync_trait('transformed_hsp_rpa',
self.hsp_rpa_obj, 'points', mutual=False)
# All points share these
for p in (self.hsp_obj, self.eeg_obj, self.hpi_obj,
self.hsp_lpa_obj, self.hsp_nasion_obj, self.hsp_rpa_obj):
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
on_pick = self.scene.mayavi_scene.on_mouse_pick
self.picker = on_pick(self.data_panel.fid_panel._on_pick, type='cell')
# Coordinate frame axes
self.mri_cf_obj = PointObject(
scene=self.scene, color=self.mri_obj.color,
opacity=self.mri_obj.opacity, label_scale=5e-3,
point_scale=0.02, name='MRI', view='arrow')
self.mri_obj.sync_trait('color', self.mri_cf_obj, mutual=False)
self._update_mri_axes()
self.hsp_cf_obj = PointObject(
scene=self.scene, color=self.hsp_obj.color,
opacity=self.mri_obj.opacity, label_scale=5e-3,
point_scale=0.02, name='Head', view='arrow')
self.hsp_cf_obj.sync_trait('color', self.hsp_cf_obj, mutual=False)
self._update_hsp_axes()
self.sync_trait('bgcolor', self.scene, 'background')
self._update_projections()
_toggle_mlab_render(self, True)
self.scene.render()
self.scene.camera.focal_point = (0., 0., 0.)
self.data_panel.view_options_panel = ViewOptionsPanel(
mri_obj=self.mri_obj, hsp_obj=self.hsp_obj,
eeg_obj=self.eeg_obj, hpi_obj=self.hpi_obj,
hsp_cf_obj=self.hsp_cf_obj, mri_cf_obj=self.mri_cf_obj,
head_high_res=self.head_high_res,
bgcolor=self.bgcolor, advanced_rendering=self.advanced_rendering)
self.data_panel.headview.scale = self._initial_kwargs['scale']
self.data_panel.headview.interaction = \
self._initial_kwargs['interaction']
self.data_panel.headview.left = True
self.data_panel.view_options_panel.sync_trait(
'coord_frame', self.model)
self.data_panel.view_options_panel.sync_trait('head_high_res', self)
self.data_panel.view_options_panel.sync_trait('advanced_rendering',
self)
self.data_panel.view_options_panel.sync_trait('bgcolor', self)
@on_trait_change('advanced_rendering')
def _on_advanced_rendering_change(self):
renderer = getattr(self.scene, 'renderer', None)
if renderer is None:
return
if self.advanced_rendering:
renderer.use_depth_peeling = 1
renderer.occlusion_ratio = 0.1
renderer.maximum_number_of_peels = 100
renderer.vtk_window.multi_samples = 0
renderer.vtk_window.alpha_bit_planes = 1
else:
renderer.use_depth_peeling = 0
renderer.vtk_window.multi_samples = 8
renderer.vtk_window.alpha_bit_planes = 0
if hasattr(renderer, 'use_fxaa'):
self.scene.renderer.use_fxaa = True
self.scene.render()
@on_trait_change('lock_fiducials')
def _on_lock_change(self):
if not self.lock_fiducials:
if self.mri_obj is None:
self._initial_kwargs['head_opacity'] = 1.
else:
self._locked_opacity = self.mri_obj.opacity
self.mri_obj.opacity = 1.
else:
if self.mri_obj is not None:
self.mri_obj.opacity = self._locked_opacity
@cached_property
def _get_hsp_visible(self):
return self.data_panel.hsp_always_visible or self.lock_fiducials
@on_trait_change('model:mri_trans')
def _update_mri_axes(self):
if self.mri_cf_obj is None:
return
nn = apply_trans(self.model.mri_trans, np.eye(3), move=False)
pts = apply_trans(self.model.mri_trans, np.zeros((3, 3)))
self.mri_cf_obj.nn = nn
self.mri_cf_obj.points = pts
@on_trait_change('model:hsp_trans')
def _update_hsp_axes(self):
if self.hsp_cf_obj is None:
return
nn = apply_trans(self.model.hsp_trans, np.eye(3), move=False)
pts = apply_trans(self.model.hsp_trans, np.zeros((3, 3)))
self.hsp_cf_obj.nn = nn
self.hsp_cf_obj.points = pts
@on_trait_change('model:mri:bem_low_res:surf,'
'model:transformed_low_res_mri_points')
def _update_projections(self):
for p in (self.eeg_obj, self.hsp_obj, self.hpi_obj):
if p is not None:
p.project_to_tris = self.model.mri.bem_low_res.surf.tris
p.project_to_points = self.model.transformed_low_res_mri_points
@on_trait_change('model:mri:bem_low_res:surf,head_high_res,'
'model:transformed_high_res_mri_points')
def _update_mri_obj(self):
if self.mri_obj is None:
return
self.mri_obj.tris = getattr(
self.model.mri, 'bem_%s_res'
% ('high' if self.head_high_res else 'low',)).surf.tris
self.mri_obj.points = getattr(
self.model, 'transformed_%s_res_mri_points'
% ('high' if self.head_high_res else 'low',))
# automatically lock fiducials if a good fiducials file is loaded
@on_trait_change('model:mri:fid_file')
def _on_fid_file_loaded(self):
self.data_panel.fid_panel.locked = bool(self.model.mri.fid_file)
def save_config(self, home_dir=None, size=None):
"""Write configuration values."""
def s_c(key, value, lower=True):
value = str(value)
if lower:
value = value.lower()
set_config(key, str(value).lower(), home_dir=home_dir,
set_env=False)
s_c('MNE_COREG_GUESS_MRI_SUBJECT', self.model.guess_mri_subject)
s_c('MNE_COREG_HEAD_HIGH_RES', self.head_high_res)
s_c('MNE_COREG_ADVANCED_RENDERING', self.advanced_rendering)
if self.lock_fiducials:
opacity = self.mri_obj.opacity
else:
opacity = self._locked_opacity
s_c('MNE_COREG_HEAD_OPACITY', opacity)
if size is not None:
s_c('MNE_COREG_WINDOW_WIDTH', size[0])
s_c('MNE_COREG_WINDOW_HEIGHT', size[1])
s_c('MNE_COREG_SCENE_SCALE', self.data_panel.headview.scale)
s_c('MNE_COREG_SCALE_LABELS', self.model.scale_labels)
s_c('MNE_COREG_COPY_ANNOT', self.model.copy_annot)
s_c('MNE_COREG_PREPARE_BEM', self.model.prepare_bem_model)
if self.model.mri.subjects_dir:
s_c('MNE_COREG_SUBJECTS_DIR', self.model.mri.subjects_dir, False)
s_c('MNE_COREG_PROJECT_EEG', self.project_to_surface)
s_c('MNE_COREG_ORIENT_TO_SURFACE', self.orient_to_surface)
s_c('MNE_COREG_SCALE_BY_DISTANCE', self.scale_by_distance)
s_c('MNE_COREG_MARK_INSIDE', self.mark_inside)
s_c('MNE_COREG_INTERACTION', self.data_panel.headview.interaction)
|
adykstra/mne-python
|
mne/gui/_coreg_gui.py
|
Python
|
bsd-3-clause
| 87,039
|
[
"Mayavi"
] |
a9094be0c6d7f85bb2df500e2dd289d3e98af47a36ddfc12044656bd06ddb453
|
from gpaw.sphere.lebedev import run, weight_n, Y_nL, R_nv
weight0_n, Y0_nL, R0_nv = run()
assert (abs(weight0_n - weight_n).sum() +
abs(Y0_nL - Y_nL).sum() +
abs(R0_nv - R_nv).sum()) < 1e-13
|
qsnake/gpaw
|
gpaw/test/lebedev.py
|
Python
|
gpl-3.0
| 209
|
[
"GPAW"
] |
27dd396035ff43d7453db401c5bf29011070de99c87ede3a0348b8b5f89bae75
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
class InputParameters:
def __init__(self, *args):
self.valid = {}
self.strict_types = {}
self.desc = {}
self.substitute = {}
self.required = set()
self.private = set()
self.group = {}
def addRequiredParam(self, name, *args):
self.required.add(name)
self.addParam(name, *args)
def addParam(self, name, *args):
if len(args) == 2:
self.valid[name] = args[0]
self.desc[name] = args[-1]
def addRequiredParamWithType(self, name, my_type, *args):
self.required.add(name)
self.addParamWithType(name, my_type, *args)
def addParamWithType(self, name, my_type, *args):
if len(args) == 3:
self.valid[name] = args[0]
self.strict_types[name] = my_type
self.desc[name] = args[-1]
def addPrivateParam(self, name, *args):
self.private.add(name)
if len(args) == 1:
self.valid[name] = args[0]
def addStringSubParam(self, name, substitution, *args):
self.substitute[name] = substitution
self.addParam(name, *args)
def isValid(self, name):
if name in self.valid and self.valid[name] != None and self.valid[name] != []:
return True
else:
return False
def __contains__(self, item):
return item in self.desc
def __getitem__(self, key):
return self.valid[key]
def __setitem__(self, key, value):
self.valid[key] = value
##
# Adds parameters from another InputParameters object via += operator
# @param add_params The InputParameters object to merge into the existing object
def __iadd__(self, add_params):
# Loop through all possible parameters and perform the correct adding into
# this InputParameters object
for key in add_params.keys():
if add_params.isRequired(key):
self.addRequiredParam(key, add_params[key], add_params.desc[key])
elif add_params.isValid(key):
self.addParam(key, add_params[key], add_params.desc[key])
else:
self.addParam(key, add_params.desc[key])
# Return this InputParameters object
return self
def type(self, key):
if key in self.valid:
return type(self.valid[key])
else:
return None
def keys(self):
return set([k for k in self.desc])
def required_keys(self):
return self.required
def valid_keys(self):
return self.valid
def substitute_keys(self):
return self.substitute
def isRequired(self, key):
return key in self.required
def getDescription(self, key):
return self.desc[key]
##
# Specify a group name for the keys listed
# @param group The name of the group to create or append
# @param prop_list The list of property names (keys) to add to the group
def addParamsToGroup(self, group, prop_list):
# Check that the group is a string
if not isinstance(group, str):
print('ERROR: The supplied group name must be a string')
return
# Check that the prop_list is a list
if not isinstance(prop_list, list):
print('ERROR: The supplied properties must be supplied as a list')
return
# Create the storage for the group if it doesn't exist
if group not in self.group:
self.group[group] = []
# Append the list
self.group[group] += prop_list
##
# Extract the parameters names (keys) from a group
# @param group The name of the group to extract keys from
# @return The list of keys for the given group
def groupKeys(self, group):
return self.group[group]
##
# Apply common parameters to parameters for this object
# @param common The common InputParameters object to apply to these parameters
def applyParams(self, common):
if not isinstance(common, InputParameters):
print('ERROR: Supplied "common" variable must of of type InputParameters')
return
# Loop through the valid parameters in the common parameters,
# if they are not valid in this set, then apply them
for common_key in common.valid_keys():
if not self.isValid(common_key):
self[common_key] = common[common_key]
def printParams(self):
for k in self.desc:
value = ''
if k in self.valid:
value = self.valid[k]
print(k.ljust(20), value)
print(' '.ljust(20), self.desc[k])
|
harterj/moose
|
python/FactorySystem/InputParameters.py
|
Python
|
lgpl-2.1
| 4,973
|
[
"MOOSE"
] |
0a14a6890090e016fdeb80b3ccbc3cfad51ea1f86179ed3748d72b56c309ca4f
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Visualize a simulation with a pool of particles with various charges,
LJ parameters and masses.
"""
import espressomd
import espressomd.electrostatics
import espressomd.visualization_opengl
import numpy as np
required_features = ["P3M", "LENNARD_JONES", "MASS"]
espressomd.assert_features(required_features)
box = [40, 40, 40]
system = espressomd.System(box_l=box)
system.cell_system.set_regular_decomposition(use_verlet_lists=True)
visualizer = espressomd.visualization_opengl.openGLLive(
system, background_color=[1, 1, 1], drag_enabled=True, drag_force=10)
# TIMESTEP
time_step_fs = 1.0
system.time_step = time_step_fs * 1.0e-2
system.cell_system.skin = 1.2
# TEMPERATURE
SI_temperature = 400.0
kb_kjmol = 0.0083145
temperature = SI_temperature * kb_kjmol
# COULOMB PREFACTOR (elementary charge)^2 / (4*pi*epsilon) in Angstrom*kJ/mol
epsilon_r = 4.0 # dimensionless
epsilon_0 = 8.8541878128e-12 # units of [C^2/J/m]
q_e = 1.602176634e-19 # units of [C]
avogadro = 6.022e23 # units of [mol]
prefactor = q_e**2 / (4 * np.pi * epsilon_r * epsilon_0) # units of [J.m]
# convert energies to kJ/mol, with distances in Angstroms
coulomb_prefactor = prefactor * avogadro / 1000 * 1e10
# FORCE FIELDS
# distances in Angstroms, epsilons in kBT, masses in g/mol
species = ["Cl", "Na", "Colloid", "Solvent"]
types = {"Cl": 0, "Na": 1, "Colloid": 2, "Solvent": 3}
charges = {"Cl": -1.0, "Na": 1.0, "Colloid": -3.0, "Solvent": 0.0}
lj_sigmas = {"Cl": 3.85, "Na": 2.52, "Colloid": 10.0, "Solvent": 1.5}
lj_epsilons = {"Cl": 192.45, "Na": 17.44,
"Colloid": 100.0, "Solvent": 50.0}
lj_cuts = {"Cl": 2.0 * lj_sigmas["Cl"], "Na": 2.0 * lj_sigmas["Na"],
"Colloid": 1.5 * lj_sigmas["Colloid"],
"Solvent": 2.0 * lj_sigmas["Solvent"]}
masses = {"Cl": 35.453, "Na": 22.99, "Colloid": 300, "Solvent": 18.0}
n_ionpairs = 50
for i in range(n_ionpairs):
for t in ["Na", "Cl"]:
system.part.add(pos=box * np.random.random(3),
q=charges[t], type=types[t], mass=masses[t])
n_colloids = 30
t = "Colloid"
t_co = "Na"
for i in range(n_colloids):
system.part.add(pos=box * np.random.random(3),
q=charges[t], type=types[t], mass=masses[t])
for i in range(int(abs(charges[t]))):
system.part.add(pos=box * np.random.random(3),
q=charges[t_co], type=types[t_co], mass=masses[t_co])
n_solvents = 800
t = "Solvent"
for i in range(n_solvents):
system.part.add(pos=box * np.random.random(3),
q=charges[t], type=types[t], mass=masses[t])
def combination_rule_epsilon(rule, eps1, eps2):
if rule == "Lorentz":
return (eps1 * eps2)**0.5
else:
return ValueError("No combination rule defined")
def combination_rule_sigma(rule, sig1, sig2):
if rule == "Berthelot":
return (sig1 + sig2) * 0.5
else:
return ValueError("No combination rule defined")
# Lennard-Jones interactions parameters
for i in range(len(species)):
for j in range(i, len(species)):
s = [species[i], species[j]]
lj_sig = combination_rule_sigma(
"Berthelot", lj_sigmas[s[0]], lj_sigmas[s[1]])
lj_cut = combination_rule_sigma(
"Berthelot", lj_cuts[s[0]], lj_cuts[s[1]])
lj_eps = combination_rule_epsilon(
"Lorentz", lj_epsilons[s[0]], lj_epsilons[s[1]])
system.non_bonded_inter[types[s[0]], types[s[1]]].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift="auto")
energy = system.analysis.energy()
print(f"Before Minimization: E_total = {energy['total']:.2e}")
system.integrator.set_steepest_descent(f_max=1000, gamma=30.0,
max_displacement=0.01)
system.integrator.run(1000)
system.integrator.set_vv()
energy = system.analysis.energy()
print(f"After Minimization: E_total = {energy['total']:.2e}")
print("Tune p3m")
p3m = espressomd.electrostatics.P3M(prefactor=coulomb_prefactor, accuracy=1e-1)
system.actors.add(p3m)
system.thermostat.set_langevin(kT=temperature, gamma=2.0, seed=42)
visualizer.run(1)
|
espressomd/espresso
|
samples/visualization_charged.py
|
Python
|
gpl-3.0
| 4,827
|
[
"Avogadro",
"ESPResSo"
] |
a945d4a79a85202bc3456e06676abfa320b2bfb4be0dcde636f626c43f26da4f
|
##
# Copyright 2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing netcdf4-python, implemented as an easyblock.
@author: Kenneth Hoste (Ghent University)
"""
import os
import easybuild.tools.environment as env
from easybuild.easyblocks.generic.pythonpackage import PythonPackage
from easybuild.tools.modules import get_software_root
class EB_netcdf4_minus_python(PythonPackage):
"""Support for building and installing netcdf4-python"""
def __init__(self, *args, **kwargs):
"""Custom constructor for netcdf4-python."""
super(EB_netcdf4_minus_python, self).__init__(*args, **kwargs)
self.options['modulename'] = 'netCDF4'
def configure_step(self):
"""
Configure and
Test if python module is loaded
"""
hdf5 = get_software_root('HDF5')
if hdf5:
env.setvar('HDF5_DIR', hdf5)
szip = get_software_root('Szip')
if szip:
env.setvar('SZIP_DIR', szip)
netcdf = get_software_root('netCDF')
if netcdf:
env.setvar('NETCDF4_DIR', netcdf)
super(EB_netcdf4_minus_python, self).configure_step()
def test_step(self):
"""Run netcdf4-python tests."""
self.testinstall = True
cwd = os.getcwd()
# ignoring test know to fail tst_dap.py tst_diskless.py
self.testcmd = "cd %s/test && mv tst_dap.py notst_dap.py && mv tst_diskless.py notst_diskless.py && python run_all.py && cd %s" % (self.cfg['start_dir'], cwd)
super(EB_netcdf4_minus_python, self).test_step()
def sanity_check_step(self):
"""Custom sanity check for netcdf4-python"""
custom_paths = {
'files': ['bin/nc3tonc4', 'bin/nc4tonc3', 'bin/ncinfo'],
'dirs': [os.path.join(self.pylibdir, 'netCDF4-1.1.8-py2.7-linux-x86_64.egg')],
}
return super(EB_netcdf4_minus_python, self).sanity_check_step(custom_paths=custom_paths)
|
eth-cscs/production
|
easybuild/easyblocks/netcdf4_python.py
|
Python
|
gpl-3.0
| 2,978
|
[
"NetCDF"
] |
345174de07b336466e114d8fb94e758415d818184f54737c846daadfdd87849c
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`http` module enables OpenLP to retrieve scripture from bible websites.
"""
import os
import logging
import re
import socket
import urllib.request, urllib.parse, urllib.error
from html.parser import HTMLParseError
from bs4 import BeautifulSoup, NavigableString, Tag
from openlp.core.lib import Registry, translate
from openlp.core.lib.ui import critical_error_message_box
from openlp.core.utils import get_web_page
from openlp.plugins.bibles.lib import SearchResults
from openlp.plugins.bibles.lib.db import BibleDB, BiblesResourcesDB, Book
CLEANER_REGEX = re.compile(r' |<br />|\'\+\'')
FIX_PUNKCTUATION_REGEX = re.compile(r'[ ]+([.,;])')
REDUCE_SPACES_REGEX = re.compile(r'[ ]{2,}')
UGLY_CHARS = {
'\u2014': ' - ',
'\u2018': '\'',
'\u2019': '\'',
'\u201c': '"',
'\u201d': '"',
' ': ' '
}
VERSE_NUMBER_REGEX = re.compile(r'v(\d{1,2})(\d{3})(\d{3}) verse.*')
log = logging.getLogger(__name__)
class BGExtract(object):
"""
Extract verses from BibleGateway
"""
def __init__(self, proxy_url=None):
log.debug('BGExtract.init("%s")', proxy_url)
self.proxy_url = proxy_url
socket.setdefaulttimeout(30)
def _remove_elements(self, parent, tag, class_=None):
"""
Remove a particular element from the BeautifulSoup tree.
``parent``
The element from which items need to be removed.
``tag``
A string of the tab type, e.g. "div"
``class_``
An HTML class attribute for further qualification.
"""
if class_:
all_tags = parent.find_all(tag, class_)
else:
all_tags = parent.find_all(tag)
for element in all_tags:
element.extract()
def _extract_verse(self, tag):
"""
Extract a verse (or part of a verse) from a tag.
``tag``
The BeautifulSoup Tag element with the stuff we want.
"""
if isinstance(tag, NavigableString):
return None, str(tag)
elif tag.get('class')[0] == "versenum" or tag.get('class')[0] == 'versenum mid-line':
verse = str(tag.string).replace('[', '').replace(']', '').strip()
return verse, None
elif tag.get('class')[0] == 'chapternum':
verse = '1'
return verse, None
else:
verse = None
text = ''
for child in tag.contents:
c_verse, c_text = self._extract_verse(child)
if c_verse:
verse = c_verse
if text and c_text:
text += c_text
elif c_text is not None:
text = c_text
return verse, text
def _clean_soup(self, tag):
"""
Remove all the rubbish from the HTML page.
``tag``
The base tag within which we want to remove stuff.
"""
self._remove_elements(tag, 'sup', 'crossreference')
self._remove_elements(tag, 'sup', 'footnote')
self._remove_elements(tag, 'div', 'footnotes')
self._remove_elements(tag, 'div', 'crossrefs')
self._remove_elements(tag, 'h3')
self._remove_elements(tag, 'h4')
self._remove_elements(tag, 'h5')
def _extract_verses(self, tags):
"""
Extract all the verses from a pre-prepared list of HTML tags.
``tags``
A list of BeautifulSoup Tag elements.
"""
verses = []
tags = tags[::-1]
current_text = ''
for tag in tags:
verse = None
text = ''
for child in tag.contents:
c_verse, c_text = self._extract_verse(child)
if c_verse:
verse = c_verse
if text and c_text:
text += c_text
elif c_text is not None:
text = c_text
if not verse:
current_text = text + ' ' + current_text
else:
text += ' ' + current_text
current_text = ''
if text:
for old, new in UGLY_CHARS.items():
text = text.replace(old, new)
text = ' '.join(text.split())
if verse and text:
verse = verse.strip()
try:
verse = int(verse)
except ValueError:
verse_parts = verse.split('-')
if len(verse_parts) > 1:
verse = int(verse_parts[0])
except TypeError:
log.warn('Illegal verse number: %s', str(verse))
verses.append((verse, text))
verse_list = {}
for verse, text in verses[::-1]:
verse_list[verse] = text
return verse_list
def _extract_verses_old(self, div):
"""
Use the old style of parsing for those Bibles on BG who mysteriously have not been migrated to the new (still
broken) HTML.
``div``
The parent div.
"""
verse_list = {}
# Cater for inconsistent mark up in the first verse of a chapter.
first_verse = div.find('versenum')
if first_verse and first_verse.contents:
verse_list[1] = str(first_verse.contents[0])
for verse in div('sup', 'versenum'):
raw_verse_num = verse.next_element
clean_verse_num = 0
# Not all verses exist in all translations and may or may not be represented by a verse number. If they are
# not fine, if they are it will probably be in a format that breaks int(). We will then have no idea what
# garbage may be sucked in to the verse text so if we do not get a clean int() then ignore the verse
# completely.
try:
clean_verse_num = int(str(raw_verse_num))
except ValueError:
verse_parts = str(raw_verse_num).split('-')
if len(verse_parts) > 1:
clean_verse_num = int(verse_parts[0])
except TypeError:
log.warn('Illegal verse number: %s', str(raw_verse_num))
if clean_verse_num:
verse_text = raw_verse_num.next_element
part = raw_verse_num.next_element.next_element
while not (isinstance(part, Tag) and part.get('class')[0] == 'versenum'):
# While we are still in the same verse grab all the text.
if isinstance(part, NavigableString):
verse_text += part
if isinstance(part.next_element, Tag) and part.next_element.name == 'div':
# Run out of verses so stop.
break
part = part.next_element
verse_list[clean_verse_num] = str(verse_text)
return verse_list
def get_bible_chapter(self, version, book_name, chapter):
"""
Access and decode Bibles via the BibleGateway website.
``version``
The version of the Bible like 31 for New International version.
``book_name``
Name of the Book.
``chapter``
Chapter number.
"""
log.debug('BGExtract.get_bible_chapter("%s", "%s", "%s")', version, book_name, chapter)
url_book_name = urllib.parse.quote(book_name.encode("utf-8"))
url_params = 'search=%s+%s&version=%s' % (url_book_name, chapter, version)
soup = get_soup_for_bible_ref(
'http://www.biblegateway.com/passage/?%s' % url_params,
pre_parse_regex=r'<meta name.*?/>', pre_parse_substitute='')
if not soup:
return None
div = soup.find('div', 'result-text-style-normal')
self._clean_soup(div)
span_list = div.find_all('span', 'text')
log.debug('Span list: %s', span_list)
if not span_list:
# If we don't get any spans then we must have the old HTML format
verse_list = self._extract_verses_old(div)
else:
verse_list = self._extract_verses(span_list)
if not verse_list:
log.debug('No content found in the BibleGateway response.')
send_error_message('parse')
return None
return SearchResults(book_name, chapter, verse_list)
def get_books_from_http(self, version):
"""
Load a list of all books a Bible contaions from BibleGateway website.
``version``
The version of the Bible like NIV for New International Version
"""
log.debug('BGExtract.get_books_from_http("%s")', version)
url_params = urllib.parse.urlencode({'action': 'getVersionInfo', 'vid': '%s' % version})
reference_url = 'http://www.biblegateway.com/versions/?%s#books' % url_params
page = get_web_page(reference_url)
if not page:
send_error_message('download')
return None
page_source = page.read()
try:
page_source = str(page_source, 'utf8')
except UnicodeDecodeError:
page_source = str(page_source, 'cp1251')
try:
soup = BeautifulSoup(page_source)
except HTMLParseError:
log.error('BeautifulSoup could not parse the Bible page.')
send_error_message('parse')
return None
if not soup:
send_error_message('parse')
return None
self.application.process_events()
content = soup.find('table', 'infotable')
if content:
content = content.find_all('tr')
if not content:
log.error('No books found in the Biblegateway response.')
send_error_message('parse')
return None
books = []
for book in content:
book = book.find('td')
if book:
books.append(book.contents[0])
return books
def _get_application(self):
"""
Adds the openlp to the class dynamically.
Windows needs to access the application in a dynamic manner.
"""
if os.name == 'nt':
return Registry().get('application')
else:
if not hasattr(self, '_application'):
self._application = Registry().get('application')
return self._application
application = property(_get_application)
class BSExtract(object):
"""
Extract verses from Bibleserver.com
"""
def __init__(self, proxy_url=None):
log.debug('BSExtract.init("%s")', proxy_url)
self.proxy_url = proxy_url
socket.setdefaulttimeout(30)
def get_bible_chapter(self, version, book_name, chapter):
"""
Access and decode bibles via Bibleserver mobile website
``version``
The version of the bible like NIV for New International Version
``book_name``
Text name of bible book e.g. Genesis, 1. John, 1John or Offenbarung
``chapter``
Chapter number
"""
log.debug('BSExtract.get_bible_chapter("%s", "%s", "%s")', version, book_name, chapter)
url_version = urllib.parse.quote(version.encode("utf-8"))
url_book_name = urllib.parse.quote(book_name.encode("utf-8"))
chapter_url = 'http://m.bibleserver.com/text/%s/%s%d' % (url_version, url_book_name, chapter)
header = ('Accept-Language', 'en')
soup = get_soup_for_bible_ref(chapter_url, header)
if not soup:
return None
self.application.process_events()
content = soup.find('div', 'content')
if not content:
log.error('No verses found in the Bibleserver response.')
send_error_message('parse')
return None
content = content.find('div').find_all('div')
verses = {}
for verse in content:
self.application.process_events()
versenumber = int(VERSE_NUMBER_REGEX.sub(r'\3', ' '.join(verse['class'])))
verses[versenumber] = verse.contents[1].rstrip('\n')
return SearchResults(book_name, chapter, verses)
def get_books_from_http(self, version):
"""
Load a list of all books a Bible contains from Bibleserver mobile website.
``version``
The version of the Bible like NIV for New International Version
"""
log.debug('BSExtract.get_books_from_http("%s")', version)
url_version = urllib.parse.quote(version.encode("utf-8"))
chapter_url = 'http://m.bibleserver.com/overlay/selectBook?translation=%s' % (url_version)
soup = get_soup_for_bible_ref(chapter_url)
if not soup:
return None
content = soup.find('ul')
if not content:
log.error('No books found in the Bibleserver response.')
send_error_message('parse')
return None
content = content.find_all('li')
return [book.contents[0].contents[0] for book in content]
def _get_application(self):
"""
Adds the openlp to the class dynamically.
Windows needs to access the application in a dynamic manner.
"""
if os.name == 'nt':
return Registry().get('application')
else:
if not hasattr(self, '_application'):
self._application = Registry().get('application')
return self._application
application = property(_get_application)
class CWExtract(object):
"""
Extract verses from CrossWalk/BibleStudyTools
"""
def __init__(self, proxy_url=None):
log.debug('CWExtract.init("%s")', proxy_url)
self.proxy_url = proxy_url
socket.setdefaulttimeout(30)
def get_bible_chapter(self, version, book_name, chapter):
"""
Access and decode bibles via the Crosswalk website
``version``
The version of the Bible like niv for New International Version
``book_name``
Text name of in english e.g. 'gen' for Genesis
``chapter``
Chapter number
"""
log.debug('CWExtract.get_bible_chapter("%s", "%s", "%s")', version, book_name, chapter)
url_book_name = book_name.replace(' ', '-')
url_book_name = url_book_name.lower()
url_book_name = urllib.parse.quote(url_book_name.encode("utf-8"))
chapter_url = 'http://www.biblestudytools.com/%s/%s/%s.html' % (version, url_book_name, chapter)
soup = get_soup_for_bible_ref(chapter_url)
if not soup:
return None
self.application.process_events()
html_verses = soup.find_all('span', 'versetext')
if not html_verses:
log.error('No verses found in the CrossWalk response.')
send_error_message('parse')
return None
verses = {}
for verse in html_verses:
self.application.process_events()
verse_number = int(verse.contents[0].contents[0])
verse_text = ''
for part in verse.contents:
self.application.process_events()
if isinstance(part, NavigableString):
verse_text += part
elif part and part.attrMap and \
(part.attrMap['class'] == 'WordsOfChrist' or part.attrMap['class'] == 'strongs'):
for subpart in part.contents:
self.application.process_events()
if isinstance(subpart, NavigableString):
verse_text += subpart
elif subpart and subpart.attrMap and subpart.attrMap['class'] == 'strongs':
for subsub in subpart.contents:
self.application.process_events()
if isinstance(subsub, NavigableString):
verse_text += subsub
self.application.process_events()
# Fix up leading and trailing spaces, multiple spaces, and spaces between text and , and .
verse_text = verse_text.strip('\n\r\t ')
verse_text = REDUCE_SPACES_REGEX.sub(' ', verse_text)
verse_text = FIX_PUNKCTUATION_REGEX.sub(r'\1', verse_text)
verses[verse_number] = verse_text
return SearchResults(book_name, chapter, verses)
def get_books_from_http(self, version):
"""
Load a list of all books a Bible contain from the Crosswalk website.
``version``
The version of the bible like NIV for New International Version
"""
log.debug('CWExtract.get_books_from_http("%s")', version)
chapter_url = 'http://www.biblestudytools.com/%s/' % (version)
soup = get_soup_for_bible_ref(chapter_url)
if not soup:
return None
content = soup.find('div', {'class': 'Body'})
content = content.find('ul', {'class': 'parent'})
if not content:
log.error('No books found in the Crosswalk response.')
send_error_message('parse')
return None
content = content.find_all('li')
books = []
for book in content:
book = book.find('a')
books.append(book.contents[0])
return books
def _get_application(self):
"""
Adds the openlp to the class dynamically.
Windows needs to access the application in a dynamic manner.
"""
if os.name == 'nt':
return Registry().get('application')
else:
if not hasattr(self, '_application'):
self._application = Registry().get('application')
return self._application
application = property(_get_application)
class HTTPBible(BibleDB):
log.info('%s HTTPBible loaded', __name__)
def __init__(self, parent, **kwargs):
"""
Finds all the bibles defined for the system. Creates an Interface Object for each bible containing connection
information.
Throws Exception if no Bibles are found.
Init confirms the bible exists and stores the database path.
"""
BibleDB.__init__(self, parent, **kwargs)
self.download_source = kwargs['download_source']
self.download_name = kwargs['download_name']
# TODO: Clean up proxy stuff. We probably want one global proxy per connection type (HTTP and HTTPS) at most.
self.proxy_server = None
self.proxy_username = None
self.proxy_password = None
if 'path' in kwargs:
self.path = kwargs['path']
if 'proxy_server' in kwargs:
self.proxy_server = kwargs['proxy_server']
if 'proxy_username' in kwargs:
self.proxy_username = kwargs['proxy_username']
if 'proxy_password' in kwargs:
self.proxy_password = kwargs['proxy_password']
def do_import(self, bible_name=None):
"""
Run the import. This method overrides the parent class method. Returns ``True`` on success, ``False`` on
failure.
"""
self.wizard.progress_bar.setMaximum(68)
self.wizard.increment_progress_bar(translate('BiblesPlugin.HTTPBible', 'Registering Bible and loading books...'))
self.save_meta('download_source', self.download_source)
self.save_meta('download_name', self.download_name)
if self.proxy_server:
self.save_meta('proxy_server', self.proxy_server)
if self.proxy_username:
# Store the proxy userid.
self.save_meta('proxy_username', self.proxy_username)
if self.proxy_password:
# Store the proxy password.
self.save_meta('proxy_password', self.proxy_password)
if self.download_source.lower() == 'crosswalk':
handler = CWExtract(self.proxy_server)
elif self.download_source.lower() == 'biblegateway':
handler = BGExtract(self.proxy_server)
elif self.download_source.lower() == 'bibleserver':
handler = BSExtract(self.proxy_server)
books = handler.get_books_from_http(self.download_name)
if not books:
log.exception('Importing books from %s - download name: "%s" '\
'failed' % (self.download_source, self.download_name))
return False
self.wizard.progress_bar.setMaximum(len(books) + 2)
self.wizard.increment_progress_bar(translate( 'BiblesPlugin.HTTPBible', 'Registering Language...'))
bible = BiblesResourcesDB.get_webbible(self.download_name, self.download_source.lower())
if bible['language_id']:
language_id = bible['language_id']
self.save_meta('language_id', language_id)
else:
language_id = self.get_language(bible_name)
if not language_id:
log.exception('Importing books from %s failed' % self.filename)
return False
for book in books:
if self.stop_import_flag:
break
self.wizard.increment_progress_bar(translate(
'BiblesPlugin.HTTPBible', 'Importing %s...', 'Importing <book name>...') % book)
book_ref_id = self.get_book_ref_id_by_name(book, len(books), language_id)
if not book_ref_id:
log.exception('Importing books from %s - download name: "%s" '\
'failed' % (self.download_source, self.download_name))
return False
book_details = BiblesResourcesDB.get_book_by_id(book_ref_id)
log.debug('Book details: Name:%s; id:%s; testament_id:%s',
book, book_ref_id, book_details['testament_id'])
self.create_book(book, book_ref_id, book_details['testament_id'])
if self.stop_import_flag:
return False
else:
return True
def get_verses(self, reference_list, show_error=True):
"""
A reimplementation of the ``BibleDB.get_verses`` method, this one is specifically for web Bibles. It first
checks to see if the particular chapter exists in the DB, and if not it pulls it from the web. If the chapter
DOES exist, it simply pulls the verses from the DB using the ancestor method.
``reference_list``
This is the list of references the media manager item wants. It is a list of tuples, with the following
format::
(book_reference_id, chapter, start_verse, end_verse)
Therefore, when you are looking for multiple items, simply break them up into references like this, bundle
them into a list. This function then runs through the list, and returns an amalgamated list of ``Verse``
objects. For example::
[(u'35', 1, 1, 1), (u'35', 2, 2, 3)]
"""
log.debug('HTTPBible.get_verses("%s")', reference_list)
for reference in reference_list:
book_id = reference[0]
db_book = self.get_book_by_book_ref_id(book_id)
if not db_book:
if show_error:
critical_error_message_box(
translate('BiblesPlugin', 'No Book Found'),
translate('BiblesPlugin', 'No matching book could be found in this Bible. Check that you have '
'spelled the name of the book correctly.'))
return []
book = db_book.name
if BibleDB.get_verse_count(self, book_id, reference[1]) == 0:
self.application.set_busy_cursor()
search_results = self.get_chapter(book, reference[1])
if search_results and search_results.has_verse_list():
## We have found a book of the bible lets check to see
## if it was there. By reusing the returned book name
## we get a correct book. For example it is possible
## to request ac and get Acts back.
book_name = search_results.book
self.application.process_events()
# Check to see if book/chapter exists.
db_book = self.get_book(book_name)
self.create_chapter(db_book.id, search_results.chapter, search_results.verse_list)
self.application.process_events()
self.application.set_normal_cursor()
self.application.process_events()
return BibleDB.get_verses(self, reference_list, show_error)
def get_chapter(self, book, chapter):
"""
Receive the request and call the relevant handler methods.
"""
log.debug('HTTPBible.get_chapter("%s", "%s")', book, chapter)
log.debug('source = %s', self.download_source)
if self.download_source.lower() == 'crosswalk':
handler = CWExtract(self.proxy_server)
elif self.download_source.lower() == 'biblegateway':
handler = BGExtract(self.proxy_server)
elif self.download_source.lower() == 'bibleserver':
handler = BSExtract(self.proxy_server)
return handler.get_bible_chapter(self.download_name, book, chapter)
def get_books(self):
"""
Return the list of books.
"""
log.debug('HTTPBible.get_books("%s")', Book.name)
return self.get_all_objects(Book, order_by_ref=Book.id)
def get_chapter_count(self, book):
"""
Return the number of chapters in a particular book.
``book``
The book object to get the chapter count for.
"""
log.debug('HTTPBible.get_chapter_count("%s")', book.name)
return BiblesResourcesDB.get_chapter_count(book.book_reference_id)
def get_verse_count(self, book_id, chapter):
"""
Return the number of verses for the specified chapter and book.
``book``
The name of the book.
``chapter``
The chapter whose verses are being counted.
"""
log.debug('HTTPBible.get_verse_count("%s", %s)', book_id, chapter)
return BiblesResourcesDB.get_verse_count(book_id, chapter)
def _get_application(self):
"""
Adds the openlp to the class dynamically.
Windows needs to access the application in a dynamic manner.
"""
if os.name == 'nt':
return Registry().get('application')
else:
if not hasattr(self, '_application'):
self._application = Registry().get('application')
return self._application
application = property(_get_application)
def get_soup_for_bible_ref(reference_url, header=None, pre_parse_regex=None, pre_parse_substitute=None):
"""
Gets a webpage and returns a parsed and optionally cleaned soup or None.
``reference_url``
The URL to obtain the soup from.
``header``
An optional HTTP header to pass to the bible web server.
``pre_parse_regex``
A regular expression to run on the webpage. Allows manipulation of the webpage before passing to BeautifulSoup
for parsing.
``pre_parse_substitute``
The text to replace any matches to the regular expression with.
"""
if not reference_url:
return None
page = get_web_page(reference_url, header, True)
if not page:
send_error_message('download')
return None
page_source = page.read()
if pre_parse_regex and pre_parse_substitute is not None:
page_source = re.sub(pre_parse_regex, pre_parse_substitute, page_source.decode())
soup = None
try:
soup = BeautifulSoup(page_source)
CLEANER_REGEX.sub('', str(soup))
except HTMLParseError:
log.exception('BeautifulSoup could not parse the bible page.')
if not soup:
send_error_message('parse')
return None
Registry().get('application').process_events()
return soup
def send_error_message(error_type):
"""
Send a standard error message informing the user of an issue.
``error_type``
The type of error that occured for the issue.
"""
if error_type == 'download':
critical_error_message_box(
translate('BiblesPlugin.HTTPBible', 'Download Error'),
translate('BiblesPlugin.HTTPBible', 'There was a problem downloading your verse selection. Please check '
'your Internet connection, and if this error continues to occur please consider reporting a bug.'))
elif error_type == 'parse':
critical_error_message_box(
translate('BiblesPlugin.HTTPBible', 'Parse Error'),
translate('BiblesPlugin.HTTPBible', 'There was a problem extracting your verse selection. If this error '
'continues to occur please consider reporting a bug.'))
|
marmyshev/bug_1117098
|
openlp/plugins/bibles/lib/http.py
|
Python
|
gpl-2.0
| 31,062
|
[
"Brian"
] |
a93feca0a5432b01149753a4e8449dee8cd4944f82b2b0d95ccd7e36a9e29ad6
|
#!/usr/bin/env python
'''
GOAL:
- this code contains all of the code to make figures for paper1
REQUIRED MODULES
- LCSbase.py
'''
###########################
###### IMPORT MODULES
###########################
import LCSbase as lb
from matplotlib import pyplot as plt
import numpy as np
import os
from LCScommon_py3 import *
from astropy.io import fits
from astropy.cosmology import WMAP9 as cosmo
import argparse# here is min mass = 9.75
###########################
##### SET UP ARGPARSE
###########################
parser = argparse.ArgumentParser(description ='Run sextractor, scamp, and swarp to determine WCS solution and make mosaics')
parser.add_argument('--minmass', dest = 'minmass', default = 9., help = 'minimum stellar mass for sample. default is log10(M*) > 7.9')
parser.add_argument('--diskonly', dest = 'diskonly', default = 1, help = 'True/False (enter 1 or 0). normalize by Simard+11 disk size rather than Re for single-component sersic fit. Default is true. ')
args = parser.parse_args()
###########################
##### DEFINITIONS
###########################
USE_DISK_ONLY = np.bool(np.float(args.diskonly))#True # set to use disk effective radius to normalize 24um size
if USE_DISK_ONLY:
print('normalizing by radius of disk')
minsize_kpc=1.3 # one mips pixel at distance of hercules
#minsize_kpc=2*minsize_kpc
mstarmin=float(args.minmass)
mstarmax=10.8
minmass=mstarmin #log of M*
ssfrmin=-12.
ssfrmax=-9
spiralcut=0.8
truncation_ratio=0.5
exterior=.68
colors=['k','b','c','g','m','y','r','sienna','0.5']
shapes=['o','*','p','d','s','^','>','<','v']
#colors=['k','b','c','g','m','y','r','sienna','0.5']
truncated=np.array([113107,140175,79360,79394,79551,79545,82185,166185,166687,162832,146659,99508,170903,18236,43796,43817,43821,70634,104038,104181],'i')
# figure setup
plotsize_single=(6.8,5)
plotsize_2panel=(10,5)
params = {'backend': 'pdf',
'axes.labelsize': 24,
'font.size': 20,
'legend.fontsize': 12,
'xtick.labelsize': 14,
'ytick.labelsize': 14,
#'lines.markeredgecolor' : 'k',
#'figure.titlesize': 20,
'mathtext.fontset': 'cm',
'mathtext.rm': 'serif',
'text.usetex': True,
'figure.figsize': plotsize_single}
plt.rcParams.update(params)
figuredir = '/Users/rfinn/Dropbox/Research/MyPapers/LCSpaper1/submit/resubmit4/'
###########################
##### START OF GALAXIES CLASS
###########################
class galaxies(lb.galaxies):
def plotsizedvdr(self,plotsingle=1,reonly=1,onlycoma=0,plotHI=0,plotbadfits=0,lowmass=0,himass=0,cluster=None,plothexbin=True,hexbinmax=40,scalepoint=0,clustername=None,blueflag=False,plotmembcut=True,colormin=.2,colormax=1,colorbydensity=False,plotoman=False,masscut=None,BTcut=None):
# log10(chabrier) = log10(Salpeter) - .25 (SFR estimate)
# log10(chabrier) = log10(diet Salpeter) - 0.1 (Stellar mass estimates)
if plotsingle:
plt.figure(figsize=(10,6))
ax=plt.gca()
plt.subplots_adjust(left=.1,bottom=.15,top=.9,right=.9)
plt.ylabel('$ \Delta v/\sigma $',fontsize=26)
plt.xlabel('$ \Delta R/R_{200} $',fontsize=26)
plt.legend(loc='upper left',numpoints=1)
colors=self.sizeratio
if colorbydensity:
colors=np.log10(self.s.SIGMA_5)
colormin=-1.5
colormax=1.5
cbticks=np.arange(colormin,colormax+.1,.1)
if USE_DISK_ONLY:
clabel=['$R_{24}/R_d$','$R_{iso}(24)/R_{iso}(r)$']
else:
clabel=['$R_e(24)/R_e(r)$','$R_{iso}(24)/R_{iso}(r)$']
cmaps=['jet_r','jet_r']
v1=[0.2,0.]
v2=[1.2,2]
nplot=1
x=(self.s.DR_R200)
y=abs(self.dv)
flag=self.sampleflag #& self.dvflag
if blueflag:
flag=self.bluesampleflag & self.dvflag
if clustername != None:
flag = flag & (self.s.CLUSTER == clustername)
if masscut != None:
flag = flag & (self.logstellarmass < masscut)
if BTcut != None:
flag = flag & (self.gim2d.B_T_r < 0.3)
if cluster != None:
flag = flag & (self.s.CLUSTER == cluster)
hexflag=self.dvflag
if cluster != None:
hexflag = hexflag & (self.s.CLUSTER == cluster)
nofitflag = self.sfsampleflag & ~self.sampleflag & self.dvflag
nofitflag = self.gim2dflag & (self.gim2d.B_T_r < .2) & self.sfsampleflag & ~self.sampleflag & self.dvflag
if cluster != None:
nofitflag = nofitflag & (self.s.CLUSTER == cluster)
if lowmass:
flag = flag & (self.s.CLUSTER_LX < 1.)
hexflag = hexflag & (self.s.CLUSTER_LX < 1.)
nofitflag = nofitflag & (self.s.CLUSTER_LX < 1.)
if himass:
flag = flag & (self.s.CLUSTER_LX > 1.)
hexflag = hexflag & (self.s.CLUSTER_LX > 1.)
nofitflag = nofitflag & (self.s.CLUSTER_LX > 1.)
if onlycoma:
flag = flag & (self.s.CLUSTER == 'Coma')
if plothexbin:
sp=plt.hexbin(x[hexflag],y[hexflag],gridsize=(30,20),alpha=.7,extent=(0,5,0,10),cmap='gray_r',vmin=0,vmax=hexbinmax)
plt.subplots_adjust(bottom=.15,left=.1,right=.95,top=.95,hspace=.02,wspace=.02)
if plotmembcut:
xl=np.array([-.2,1,1])
yl=np.array([3,3,-0.1])
plt.plot(xl,yl,'k-',lw=2)
elif plotoman: # line to identify infall galaxies from Oman+2013
xl=np.arange(0,2,.1)
plt.plot(xl,-4./3.*xl+2,'k-',lw=3)
#plt.plot(xl,-3./1.2*xl+3,'k-',lw=3)
else: # cut from Jaffe+2011
xl=np.array([0.01,1.2])
yl=np.array([1.5,0])
plt.plot(xl,yl,'k-',lw=2)
if reonly:
nplot=1
else:
nplot=2
if scalepoint:
size=(self.ssfrms[flag]+2)*40
else:
size=60
for i in range(nplot):
if not(reonly):
plt.subplot(1,2,nplot)
nplot +=1
if plotbadfits:
plt.scatter(x[nofitflag],y[nofitflag],marker='x',color='k',s=40,edgecolors='k')#markersize=8,mec='r',mfc='None',label='No Fit')
ax=plt.gca()
if colorbydensity:
sp=plt.scatter(x[flag],y[flag],c=colors[flag],s=size,cmap='jet',vmin=colormin,vmax=colormax,edgecolors=None,lw=0.)
else:
sp=plt.scatter(x[flag],y[flag],c=colors[flag],s=size,cmap='jet_r',vmin=colormin,vmax=colormax,edgecolors=None,lw=0.)
plt.axis([-.1,4.5,-.1,5])
if masscut != None:
plt.axis([-.1,4.5,-.1,4])
if i > 0:
ax.set_yticklabels(([]))
ax.tick_params(axis='both', which='major', labelsize=16)
if plotsingle:
cb=plt.colorbar(sp,fraction=0.08,label=clabel[i],ticks=cbticks)#cax=axins1,ticks=cbticks[i])
#text(.95,.9,clabel[i],transform=ax.transAxes,horizontalalignment='right',fontsize=20)
if plotHI:
f=flag & self.HIflag
plt.plot(x[f],y[f],'bs',mfc='None',mec='b',lw=2,markersize=20)
if not(reonly):
ax.text(0,-.1,'$ \Delta R/R_{200} $',fontsize=22,transform=ax.transAxes,horizontalalignment='center')
ax.text(-1.3,.5,'$\Delta v/\sigma_v $',fontsize=22,transform=ax.transAxes,rotation=90,verticalalignment='center')
if lowmass:
figname=homedir+'research/LocalClusters/SamplePlots/sizedvdr-lowLx'
elif himass:
figname=homedir+'research/LocalClusters/SamplePlots/sizedvdr-hiLx'
else:
figname=homedir+'research/LocalClusters/SamplePlots/sizedvdr'
if plotsingle:
if masscut != None:
plt.savefig(figuredir+'sizedvdr-lowmass-lowBT.eps')
plt.savefig(figuredir+'fig4.pdf')
def compare_cluster_exterior(self):
plt.figure(figsize=plotsize_single)
plt.subplots_adjust(bottom=.15,hspace=.4,top=.95)
plt.subplot(2,2,1)
self.compare_single((self.logstellarmass),baseflag=(self.sampleflag & ~self.agnflag),plotsingle=False,xlab='$ log_{10}(M_*/M_\odot) $',plotname='stellarmass')
plt.legend(loc='upper left')
plt.xticks(np.arange(9,12,.5))
plt.xlim(8.9,11.15)
#xlim(mstarmin,mstarmax)
plt.subplot(2,2,2)
self.compare_single(self.gim2d.B_T_r,baseflag=(self.sampleflag & ~self.agnflag),plotsingle=False,xlab='$GIM2D \ B/T $',plotname='BT')
plt.xticks(np.arange(0,1.1,.2))
plt.xlim(-.05,.85)
plt.subplot(2,2,3)
self.compare_single(self.s.ZDIST,baseflag=(self.sampleflag & ~self.agnflag),plotsingle=False,xlab='$ Redshift $',plotname='zdist')
plt.xticks(np.arange(0.02,.055,.01))
plt.xlim(.0146,.045)
plt.subplot(2,2,4)
#self.compare_single(self.s.SERSIC_TH50*self.da,baseflag=(self.sampleflag & ~self.agnflag),plotsingle=False,xlab='$R_e(r) \ (kpc)$',plotname='Rer')
self.compare_single(self.gim2d.Rhlr,baseflag=(self.sampleflag & ~self.agnflag),plotsingle=False,xlab='$R_e(r) \ (kpc)$',plotname='Rer')
#xticks(arange(2,20,2))
#plt.xlim(2,20)
plt.text(-1.5,1,'$Cumulative \ Distribution$',fontsize=22,transform=plt.gca().transAxes,rotation=90,verticalalignment='center')
#plt.savefig(homedir+'research/LocalClusters/SamplePlots/cluster_exterior.png')
#plt.savefig(homedir+'research/LocalClusters/SamplePlots/cluster_exterior.eps')
plt.savefig(figuredir+'fig5.eps')
def compare_single(self,var,baseflag=None,plotsingle=True,xlab=None,plotname=None):
if baseflag == None:
f1 = self.sampleflag & self.membflag & ~self.agnflag
f2 = self.sampleflag & ~self.membflag &self.dvflag & ~self.agnflag
else:
f1=baseflag & self.sampleflag & self.membflag & ~self.agnflag
f2=baseflag & self.sampleflag & ~self.membflag & ~self.agnflag
xmin=min(var[baseflag])
xmax=max(var[baseflag])
#print 'xmin, xmax = ',xmin,xmax
print('KS test comparing members and exterior')
(D,p)=ks(var[f1],var[f2])
#t=anderson.anderson_ksamp([var[f1],var[f2]])
#print '%%%%%%%%% ANDERSON %%%%%%%%%%%'
#print 'anderson statistic = ',t[0]
#print 'critical values = ',t[1]
#print 'p-value = ',t[2]
if plotsingle:
plt.figure()#figsize=(12,6))
plt.title('Member vs. External ('+self.prefix+')')
subplots_adjust(bottom=.15,left=.15)
print('hey')
plt.xlabel(xlab,fontsize=18)
#plt.ylabel('$Cumulative \ Distribution $',fontsize=20)
plt.legend(loc='lower right')
plt.hist(var[f1],bins=len(var[f1]),cumulative=True,histtype='step',normed=True,label='Core',range=(xmin,xmax),color='k')
#print var[f2]
plt.hist(var[f2],bins=len(var[f2]),cumulative=True,histtype='step',normed=True,label='External',range=(xmin,xmax),color='0.5')
ylim(-.05,1.05)
ax=gca()
text(.9,.25,'$D = %4.2f$'%(D),horizontalalignment='right',transform=ax.transAxes,fontsize=16)
text(.9,.1,'$p = %5.4f$'%(p),horizontalalignment='right',transform=ax.transAxes,fontsize=16)
return D, p
def plotRe24vsRe(self,plotsingle=1,sbcutobs=20.,prefix=None,usemyflag=0,myflag=None,showerr=0,logy=True,fixPA=False, usedr=False,colorflag=True):
#print 'hi'
if plotsingle:
plt.figure(figsize=(10,8))
ax=plt.gca()
plt.xlabel('$ R_e(r) \ (arcsec)$',fontsize=20)
plt.ylabel('$ R_e(24) \ (arcsec) $',fontsize=20)
#legend(loc='upper left',numpoints=1)
if usemyflag:
flag=myflag
else:
flag=self.sampleflag & (self.sb_obs < sbcutobs)
mflag=flag & self.membflag
nfflag = flag & ~self.membflag & self.dvflag
ffflag = flag & ~self.membflag & ~self.dvflag
print('flag = ',sum(mflag),sum(nfflag),sum(ffflag))
x=(self.gim2d.Rhlr)
if USE_DISK_ONLY:
x=self.gim2d.Rd
if fixPA:
y=self.s.fcre1*mipspixelscale
myerr=self.s.fcre1err*mipspixelscale
else:
y=self.s.fcre1*mipspixelscale
myerr=self.s.fcre1err*mipspixelscale
y=self.s.SUPER_RE1*mipspixelscale*self.DA
myerr=self.s.SUPER_RE1ERR*mipspixelscale*self.DA
if plotsingle:
print('not printing errorbars')
else:
plt.errorbar(x[flag],y[flag],yerr=myerr[flag],fmt=None,ecolor='k')
mstarmin=9.3
mstarmax=11
color=self.logstellarmass
cblabel='$log_{10}(M_*/M\odot) $'
v1=mstarmin
v2=mstarmax
colormap=cm.jet
if usedr:
color=np.log10(sqrt(self.s.DR_R200**2 + self.s.DELTA_V**2))
cblabel='$\Delta r/R_{200}$'
cblabel='$log_{10}(\sqrt(\Delta r/R_{200}^2 + \Delta v/\sigma^2)$'
v1=-.5
v2=.7
colormap=cm.jet_r
if colorflag:
plotcolors = ['r','b']
else:
plotcolors = ['k','0.5']
plt.plot(x[mflag ],y[mflag],'ko',color=plotcolors[0],markersize=8,mec='k')
plt.plot(x[nfflag ],y[nfflag],'ks',color=plotcolors[1],markersize=8,mec='k')
plt.plot(x[ffflag ],y[ffflag],'ks',color=plotcolors[1],markersize=8,mec='k')
uflag = flag & self.upperlimit
print('number of upper limits = ',sum(uflag))
uplimits=np.array(list(zip(ones(sum(uflag)), zeros(sum(uflag)))))
plt.errorbar(x[uflag],y[uflag],yerr=uplimits.T, lolims=True, fmt='*',ecolor='k',color='k',markersize=12)
if plotsingle:
plt.colorbar(sp)
self.addlines(logflag=logy)
ax=plt.gca()
plt.axis([.5,12,-.5,7.3])
def addlines(self,logflag=True):
xl=np.arange(0,100,.5)
plt.plot(xl,xl,'k-')
if logflag:
ax=plt.gca()
ax.set_yscale('log')
ax.set_xscale('log')
plt.axis([1,30.,1,30.])
def plotsizehist(self, btcut = None,colorflag=True):
figure(figsize=(6,6))
plt.subplots_adjust(left=.15,bottom=.2,hspace=.1)
axes=[]
plt.subplot(2,1,1)
axes.append(plt.gca())
mybins=np.arange(0,2,.15)
if btcut == None:
flag = self.sampleflag
else:
flag = self.sampleflag & (self.gim2d.B_T_r < btcut)
if colorflag:
colors = ['r','b']
else:
colors = ['k','k']
flags = [flag & self.membflag & ~self.agnflag,flag & ~self.membflag & ~self.agnflag]
labels = ['$Core$','$External$']
for i in range(len(colors)):
plt.subplot(2,1,i+1)
print('median ratio for ',labels[i],' = ',np.median(self.sizeratio[flags[i]]))
hist(self.sizeratio[flags[i]],bins=mybins,histtype='stepfilled',color=colors[i],label=labels[i],lw=1.5,alpha=1)#,normed=True)
plt.legend(loc='upper right')
plt.axis([0,2,0,22])
if i < 1:
plt.xticks(([]))
plt.text(-.2,1,'$N_{gal}$',transform=gca().transAxes,verticalalignment='center',rotation=90,fontsize=24)
print('comparing cluster and exterior SF galaxies')
ks(self.sizeratio[flag & self.membflag & ~self.agnflag],self.sizeratio[flag & ~self.membflag & ~self.agnflag])
plt.xlabel('$ R_{24}/R_d $')
if btcut == None:
#plt.ylim(0,20)
#plt.savefig(homedir+'research/LocalClusters/SamplePlots/sizehistblue.eps')
#plt.savefig(homedir+'research/LocalClusters/SamplePlots/sizehistblue.png')
plt.savefig(figuredir+'fig11a.eps')
else:
#plt.ylim(0,15)
plt.subplot(2,1,1)
plt.title('$ B/T < %2.1f \ Galaxies $'%(btcut),fontsize=20)
#plt.savefig(homedir+'research/LocalClusters/SamplePlots/sizehistblueBTcut.eps')
#plt.savefig(homedir+'research/LocalClusters/SamplePlots/sizehistblueBTcut.png')
plt.savefig(figuredir+'fig11b.eps')
def plotsize3panel(self,logyscale=False,use_median=True,equal_pop_bins=True):
plt.figure(figsize=(10,10))
plt.subplots_adjust(left=.12,bottom=.1,top=.9,wspace=.02,hspace=.4)
nrow=3
ncol=3
flags=[self.sampleflag, self.sampleflag & self.membflag, self.sampleflag & ~self.membflag]
flags = flags & (self.s.SIGMA_5 > 0.)
x=[self.gim2d.B_T_r,np.log10(self.s.SIGMA_5),self.logstellarmass]
xlabels=['$B/T$','$\log_{10}(\Sigma_5 \ (gal/Mpc^2))$','$\log_{10}(M_\star/M_\odot)$']
colors=[self.logstellarmass,self.gim2d.B_T_r,self.gim2d.B_T_r]
cblabel=['$\log(M_\star/M_\odot)$','$B/T$','$B/T$']
cbticks=[np.arange(8.5,10.8,.4),np.arange(0,1,.2),np.arange(0,1,.2)]
xticklabels=[np.arange(0,1,.2),np.arange(-1.2,2.2,1),np.arange(8.5,11.5,1)]
xlims=[(-.05,.9),(-1.1,1.9),(8.3,11.2)]
v1 = [8.5,0,0]
v2 = [10.8,0.6,0.6]
y=self.sizeratio
yerror=self.sizeratioERR
for i in range(len(x)):
allax=[]
for j in range(3):
plt.subplot(nrow,ncol,3.*i+j+1)
plt.errorbar(x[i][flags[j]],y[flags[j]],yerr=yerror[flags[j]],fmt=None,ecolor='.5',markerfacecolor='white',zorder=1,alpha=.5)
sp=plt.scatter(x[i][flags[j]],y[flags[j]],c=colors[i][flags[j]],vmin=v1[i],vmax=v2[i],cmap='jet',s=40,label='GALFIT',lw=0,alpha=0.7,zorder=1,edgecolors='k')
if j < 3:
(rho,p)=spearman_with_errors(x[i][flags[j]],y[flags[j]],yerror[flags[j]])
ax=plt.gca()
plt.text(.95,.9,r'$\rho = [%4.2f, %4.2f]$'%(np.percentile(rho,16),np.percentile(rho,84)),horizontalalignment='right',transform=ax.transAxes,fontsize=16)
plt.text(.95,.8,'$p = [%5.4f, %5.4f]$'%(np.percentile(p,16),np.percentile(p,84)),horizontalalignment='right',transform=ax.transAxes,fontsize=16)
a=plt.gca()
#plt.axis(limits)
allax.append(a)
if j > 0:
a.set_yticklabels(([]))
if i == 0:
if j == 0:
plt.title('$All $',fontsize=24)
elif j == 1:
plt.title('$Core$',fontsize=24)
elif j == 2:
plt.title('$External$',fontsize=24)
if j == 1:
plt.xlabel(xlabels[i])
if j == 0:
#plt.ylabel('$R_e(24)/Re(r)$')
plt.ylabel('$R_{24}/R_d$')
xbin,ybin,ybinerr, colorbin = binxycolor(x[i][flags[j]],y[flags[j]],colors[i][flags[j]],nbin=5,erry=True,equal_pop_bins=equal_pop_bins,use_median = use_median)
plt.scatter(xbin,ybin,c=colorbin,s=180,vmin=v1[i],vmax=v2[i],cmap='jet',zorder=5,lw=2,edgecolors='k')
plt.errorbar(xbin,ybin,ybinerr,fmt=None,ecolor='k',alpha=0.7)
if logyscale:
a.set_yscale('log')
ylim(.08,6)
else:
ylim(-.1,3.3)
yticks((np.arange(0,4,1)))
xticks(xticklabels[i])
xlim(xlims[i])
#ylim(-.1,2.8)
if j == 2:
c = np.polyfit(xbin,ybin,1)
print('xbin = ', xbin)
print('ybin = ', ybin)
#c = np.polyfit(x[i][flags[j]],y[flags[j]],1)
xl=np.linspace(min(x[i][flags[j]]),max(x[i][flags[j]]),10)
yl = np.polyval(c,xl)
plt.plot(xl,yl,'k--',lw=2)
plt.subplot(nrow,ncol,3.*i+j)
xl=np.linspace(min(x[i][flags[j-1]]),max(x[i][flags[j-1]]),10)
yl = np.polyval(c,xl)
plt.plot(xl,yl,'k--',lw=2)
#print xbin,ybin,colorbin
#if i == 2:
# #text(0.1,0.9,'$External$',transform=a.transAxes,horizontalalignment='left',fontsize=20)
# text(-2.3,1.7,'$R_e(24)/Re(r)$',transform=a.transAxes,rotation=90,horizontalalignment='center',verticalalignment='center',fontsize=26)
c=colorbar(ax=allax,fraction=.02,ticks=cbticks[i])
c.ax.text(6,.5,cblabel[i],rotation=-90,verticalalignment='center',fontsize=20)
savefig(figuredir+'fig12.pdf')
def plotsizestellarmass(self,plotsingle=True,btmax=None,btmin=None,equal_pop_bins=True,use_median=True):
if plotsingle:
plt.figure(figsize=(7,6))
plt.subplots_adjust(bottom=.15,left=.15)
flags = [self.sampleflag & self.membflag,self.sampleflag & ~self.membflag]
if btmax != None:
flags = flags & (self.gim2d.B_T_r < btmax)
if btmin != None:
flags = flags & (self.gim2d.B_T_r > btmin)
colors = ['r','b']
for i in range(len(flags)):
#plot(self.logstellarmass[flags[i]],self.sizeratio[flags[i]],'ro',color=colors[i],alpha=0.5)
plot(self.logstellarmass[flags[i]],self.sizeratio[flags[i]],'ro',color=colors[i],alpha=0.5)
errorbar(self.logstellarmass[flags[i]],self.sizeratio[flags[i]],self.sizeratioERR[flags[i]],fmt=None,ecolor='0.5',alpha=0.5)
flag = flags[i]
if btmax != None:
flag = flag & (self.logstellarmass > 9.1) & (self.logstellarmass < 10.5)
xbin,ybin,ybinerr,colorbin = binxycolor(self.logstellarmass[flag],self.sizeratio[flag],self.gim2d.B_T_r[flag],erry=True,nbin=5,equal_pop_bins=equal_pop_bins,use_median=use_median)
#print xbin
plot(xbin,ybin,'ro',color=colors[i],markersize=18,mec='k',zorder=5)
#scatter(xbin,ybin,s=200, c=colorbin,marker='^',vmin=0,vmax=0.6,cmap='jet')
errorbar(xbin,ybin,ybinerr,fmt=None,ecolor='k',alpha=0.7)
#colorbar(label='$B/T$')
xlabel('$ \log_{10}(M_\star /M_\odot) $',fontsize=22)
ylabel('$ R_{24}/R_d $',fontsize=22)
#rho,p=spearman(self.logstellarmass[flag],self.sizeratio[flag])
#ax=plt.gca()
#plt.text(.95,.9,r'$\rho = %4.2f$'%(rho),horizontalalignment='right',transform=ax.transAxes,fontsize=18)
#plt.text(.95,.8,'$p = %5.4f$'%(p),horizontalalignment='right',transform=ax.transAxes,fontsize=18)
plt.legend(['$Core$','$<Core>$','$External$','$<External>$'],numpoints=1)
s=''
if btmax != None:
s = '$B/T \ < \ %.2f$'%(btmax)
if btmin != None:
s = '$B/T \ > \ %.2f$'%(btmin)
if (btmax != None) & (btmin != None):
s = '$%.2f < B/T \ < \ %.2f$'%(btmin,btmax)
plt.title(s,fontsize=20)
plt.axis([8.6,10.9,-.1,2.9])
plt.savefig(figuredir+'fig13.pdf')
def plotsizeHIfrac(self,sbcutobs=20.5,isoflag=0,r90flag=0,color_BT=False):
plt.figure(figsize=plotsize_single)
plt.subplots_adjust(bottom=.2,left=.15)
plt.clf()
flag = self.sampleflag & (self.HIflag) #& self.dvflag #& ~self.agnflag
print('number of galaxies = ',sum(flag))
y=(self.sizeratio[flag & self.membflag])
x=np.log10(self.s.HIMASS[flag & self.membflag])-self.logstellarmass[flag & self.membflag]
print('spearman for cluster galaxies only')
t = spearman(x,y)
if color_BT:
pointcolor = self.gim2d.B_T_r
v1=0
v2=0.6
else:
pointcolor = self.logstellarmass
v1=mstarmin
v2=mstarmax
#color=self.logstellarmass[flag]
color=pointcolor[flag & self.membflag]
sp=scatter(x,y,s=90,c=color,vmin=v1,vmax=v2,label='$Core$',cmap='jet',edgecolors='k')
y=(self.sizeratio[flag & ~self.membflag])
x=np.log10(self.s.HIMASS[flag & ~self.membflag])-self.logstellarmass[flag & ~self.membflag]
print('spearman for exterior galaxies only')
t = spearman(x,y)
color=pointcolor[flag & ~self.membflag]
sp=scatter(x,y,s=90,c=color,vmin=v1,vmax=v2,marker='s',label='$External$',cmap='jet',edgecolor='k')
y=(self.sizeratio[flag])
x=np.log10(self.s.HIMASS[flag])-self.logstellarmass[flag]
plt.legend(loc='upper left',scatterpoints=1)
errorbar(x,y,self.sizeratioERR[flag],fmt=None,ecolor='.5',zorder=100)
rho,p=spearman(x,y)
ax=plt.gca()
plt.text(.95,.9,r'$\rho = %4.2f$'%(rho),horizontalalignment='right',transform=ax.transAxes,fontsize=16)
plt.text(.95,.8,'$p = %5.4f$'%(p),horizontalalignment='right',transform=ax.transAxes,fontsize=16)
print('spearman for log(M*) < 10.41')
rho,p=spearman(x[color < 10.41],y[color<10.41])
cb = plt.colorbar(sp,fraction=.08,ticks=np.arange(8.5,11,.5))
cb.ax.text(4.,.5,'$\log(M_\star/M_\odot)$',rotation=-90,verticalalignment='center',fontsize=20)
#plt.ylabel(r'$ R_e(24)/R_e(r)$')
plt.ylabel('$R_{24}/R_d$')
plt.xlabel(r'$ \log_{10}(M_{HI}/M_*)$')
ax.tick_params(axis='both', which='major', labelsize=16)
plt.axis([-1.8,1.6,0,2.5])
plt.savefig(figuredir+'fig16a.eps')
def plotsizeHIdef(self,sbcutobs=20.5,isoflag=0,r90flag=0):
figure(figsize=plotsize_single)
plt.subplots_adjust(left=.15,bottom=.2)
clf()
flag = self.sampleflag & (self.HIflag) #& self.membflag #& self.dvflag
print('number of galaxies = ',sum(flag))
y=(self.sizeratio[flag & self.membflag])
x=(self.s.HIDef[flag & self.membflag])
print('spearman for cluster galaxies only')
t = spearman(x,y)
#color=self.logstellarmass[flag]
#color=self.logstellarmass[flag & s.membflag]
colors=self.logstellarmass
color=colors[flag & self.membflag]
sp=scatter(x,y,s=90,c=color,vmin=mstarmin,vmax=mstarmax,label='$Core$',cmap='jet',edgecolor='k')
y=(self.sizeratio[flag & ~self.membflag])
x=(self.s.HIDef[flag & ~self.membflag])
print('spearman for exterior galaxies only')
t = spearman(x,y)
#color=self.logstellarmass[flag]
color=colors[flag & ~self.membflag]
sp=scatter(x,y,s=90,c=color,vmin=8.5,vmax=10.8,marker='s',label='$External$',cmap='jet',edgecolor='k')
y=(self.sizeratio[flag])
x=(self.s.HIDef[flag])
plt.legend(loc='upper left',scatterpoints=1)
errorbar(x,y,self.sizeratioERR[flag],fmt=None,ecolor='.5',zorder=100)
rho,p=spearman(x,y)
ax=plt.gca()
text(.95,.9,r'$\rho = %4.2f$'%(rho),horizontalalignment='right',transform=ax.transAxes,fontsize=16)
text(.95,.8,'$p = %5.4f$'%(p),horizontalalignment='right',transform=ax.transAxes,fontsize=16)
print('spearman for log(M*) < 10.41')
rho,p=spearman(x[color < 10.41],y[color<10.41])
cb = plt.colorbar(sp,fraction=.08,ticks=np.arange(8.5,11,.5))
cb.ax.text(4.,.5,'$\log(M_\star/M_\odot)$',rotation=-90,verticalalignment='center',fontsize=20)
plt.ylabel('$R_{24}/R_d$')
plt.xlabel('$HI \ Deficiency$')#,fontsize=26)
plt.axis([-.6,1.6,0,2.5])
plt.savefig(figuredir+'fig16b.eps')
def plotNUVrsize(self):
plt.figure(figsize=(10,4))
plt.subplots_adjust(left=.1,wspace=.01,bottom=.2,right=.9)
BTmin = 0
BTmax = 0.4
flags = [self.sampleflag, self.sampleflag & self.membflag,self.sampleflag & ~self.membflag]
labels = ['$All$','$Core$','$External$']
allax=[]
for i in range(3):
plt.subplot(1,3,i+1)
plt.scatter(self.sizeratio[flags[i]],self.NUVr[flags[i]],c=self.gim2d.B_T_r[flags[i]],s=60,cmap='jet',vmin=BTmin,vmax=BTmax,edgecolors='k')
if i == 0:
plt.ylabel('$NUV-r$',fontsize=24)
else:
plt.gca().set_yticklabels(([]))
text(0.98,0.9,labels[i],transform=gca().transAxes,horizontalalignment='right',fontsize=20)
(rho,p)=spearman_with_errors(self.NUVr[flags[i]],self.sizeratio[flags[i]],self.sizeratioERR[flags[i]])
ax=plt.gca()
plt.text(.05,.1,r'$\rho = [%4.2f, %4.2f]$'%(np.percentile(rho,16),np.percentile(rho,84)),horizontalalignment='left',transform=ax.transAxes,fontsize=12)
plt.text(.05,.03,'$p = [%5.4f, %5.4f]$'%(np.percentile(p,16),np.percentile(p,84)),horizontalalignment='left',transform=ax.transAxes,fontsize=12)
plt.axhline(y=4,ls='-',color='0.5')
plt.axhline(y=4.5,ls='--',color='0.5')
plt.axhline(y=3.5,ls='--',color='0.5')
allax.append(plt.gca())
plt.xticks(np.arange(0,4))
plt.axis([-0.3,3.1,0,6.2])
colorlabel='$B/T$'
c=plt.colorbar(ax=allax,fraction=.02,ticks = np.arange(0,.5,.1))
c.ax.text(3.5,.5,colorlabel,rotation=-90,verticalalignment='center',fontsize=20)
plt.text(-.51,-.2,'$R_{24}/R_d $',transform=plt.gca().transAxes,fontsize=24,horizontalalignment='center')
plt.savefig(figuredir+'fig17.eps')
def plotsizevsMclallwhisker(sbcutobs=20,masscut=None,drcut=1.,blueflag=False,usetemp=False,useM500=False,usesigma=False,bwflag=True,btcut=None):
plt.figure(figsize=(10,8))
plt.subplots_adjust(hspace=.02,wspace=.02,bottom=.15,left=.15)
i=0
x1=[]
y1=[]
y2all=[]
y3all=[]
for cl in clusternamesbylx:
flag = (g.s.CLUSTER == cl) & g.sampleflag & g.membflag & ~g.agnflag
if btcut != None:
flag = flag & (g.gim2d.B_T_r < btcut)#& ~s.blueflag
print('number in ',cl,' = ',sum(flag))
if masscut != None:
flag=flag & (g.logstellarmass < masscut)
if usetemp:
x=float(clusterTx[cl])
elif useM500:
x=clusterXray[cl][1] # M500
elif usesigma:
x=log10(clustersigma[cl])
else:
x=log10(clusterLx[cl])+44
y=(g.sizeratio[flag])
y2=(g.size_ratio_corr[flag])
BT=mean(g.gim2d.B_T_r[flag & g.gim2dflag])
erry=std(g.sizeratioERR[flag])/sum(flag)
#plot(x,median(y2),'k.',label='_nolegend_')
if x > -99: #check for temp data, which is negative if not available
print(x, y)
if bwflag:
plt.plot(x,median(y),'k.',color='k',marker=shapes[i],markersize=18,label=cl)
bp = plt.boxplot([y],positions=[x],whis=99)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='black', marker='+')
plt.setp(bp['medians'], color='black')
else:
plt.plot(x,median(y),'k.',color=colors[i],marker=shapes[i],markersize=20,label=cl)
plt.boxplot([y],positions=[x],whis=99)
x1.append(x)
y1.append(median(y))
y2all.append(median(y2))
y3all.append(mean(y2))
#errorbar(x,y,yerr=erry,fmt=None,ecolor=colors[i])
#plot(x,BT,'b^',markersize=15)
i+=1
plt.legend(loc='upper right',numpoints=1,markerscale=.6)
flag = g.sampleflag & ~g.membflag & ~g.agnflag #& ~s.dvflag
exteriorvalue=mean(g.sizeratio[flag])
errexteriorvalue=std(g.sizeratio[flag])/sqrt(1.*sum(flag))
plt.axhline(y=exteriorvalue,color='0.5',ls='-')
plt.axhline(y=exteriorvalue+errexteriorvalue,color='0.5',ls='--')
plt.axhline(y=exteriorvalue-errexteriorvalue,color='0.5',ls='--')
#print 'size corrected by B/A'
#spearman(x1,y2all)
#print y1
#print y2all
#print 'size corrected by B/A, mean'
#spearman(x1,y3all)
ax=plt.gca()
#ax.set_xscale('log')
#xl=arange(41,45,.1)
#yl=-.3*(xl-43.)+.64
#plot(xl,yl,'k--')
if usetemp:
plt.xlabel('$ T_X (kev)$',fontsize = 28)
else:
plt.xlabel('$ log_{10}(L_X \ erg \ s^{-1} )$',fontsize = 28)
plt.ylabel('$R_{24}/R_d$',fontsize = 28)
if usetemp:
plt.xticks(np.arange(0,10.,1))
plt.axis([-.05,10.5,0.,1.2])
ax.tick_params(axis='both', which='major', labelsize=16)
elif useM500:
plt.axis([-.75,5.5,0.,1.2])
ax.tick_params(axis='both', which='major', labelsize=16)
elif usesigma:
#axis([2,3.5,0.,1.2])
ax.tick_params(axis='both', which='major', labelsize=16)
#xticks(arange(2,4,.5))#,['','44','45'])
else:
plt.axis([42.5,45.5,-.1,2.8])
plt.xticks(np.arange(43,46),['43','44','45'])
ax.tick_params(axis='both', which='major', labelsize=16)
plt.savefig(figuredir+'fig14.eps')
def plotsigmaLx(bwflag=True):
plt.figure(figsize=[7,6])
plt.clf()
plt.subplots_adjust(left=.16,bottom=.16,right=.95,top=.95,wspace=.3)
i=0
x=[]
y=[]
errm=[]
errp=[]
for cl in clusternamesbylx:
if bwflag:
plt.plot(clusterLx[cl],clusterbiweightscale[cl],'ko',marker=shapes[i],markersize=18,mfc=None,label=cl)
else:
plt.plot(clusterLx[cl],clusterbiweightscale[cl],'ko',color=colors[i],marker=shapes[i],markersize=16,label=cl)
errm.append(clusterbiweightscale_errm[cl])
errp.append(clusterbiweightscale_errp[cl])
x.append(clusterLx[cl])
y.append(clusterbiweightscale[cl])
i += 1
errm=array(errm)
errp=array(errp)
yerror=array(list(zip(errm, errp)),'f')
#print 'yerror = ',yerror
errorbar(x,y,yerr=yerror.T,fmt=None,ecolor='k')
# plot comparison sample
mah = fits.getdata(homedir+'/github/LCS/tables/Mahdavi2001/systems.fits')
# correct Lx to convert from H0=50 to H0=71 (divide by 1.96)
# convert bolometric luminosity to L in 0.1-2.4 kev band, which is what I use in the figure
# this conversion depends on temperature, ranges from 1.44 - 4.05; using 1.4 as a typical value
# this also brings coma into agreement
plt.plot(10.**(mah.logLXbol-44.)/1.96/1.4,10.**mah.logsigma,'k.',c='0.5',alpha=0.5)
plt.gca().set_xscale('log')
plt.xlabel('$L_X \ (10^{44} \ erg/s)$',fontsize=26)
plt.ylabel('$\sigma \ (km/s) $',fontsize=26)
plt.axis([.04,10,300,1100])
leg=plt.legend(numpoints=1,loc='upper left',scatterpoints=1,markerscale=.6,borderpad=.2,labelspacing=.2,handletextpad=.2,prop={'size':14})
gca().tick_params(axis='both', which='major', labelsize=16)
plt.savefig(figuredir+'fig1.eps')
def plotpositionson24(plotsingle=0,plotcolorbar=1,plotnofit=0,useirsb=0):
plt.figure(figsize=(10,8))
plt.subplots_adjust(hspace=.02,wspace=.02,left=.12,bottom=.12,right=.85)
i=1
allax=[]
for cl in clusternamesbylx:
plt.subplot(3,3,i)
infile=homedir+'/github/LCS/tables/clustertables/'+cl+'_NSAmastertable.fits'
d=fits.getdata(infile)
#print cl, i
ra=g.s.RA-clusterRA[cl]
dec=g.s.DEC-clusterDec[cl]
r200=2.02*(clusterbiweightscale[cl])/1000./sqrt(OmegaL+OmegaM*(1.+clusterz[cl])**3)*H0/70. # in Mpc
r200deg=r200*1000./(cosmo.angular_diameter_distance(clusterbiweightcenter[cl]/3.e5).value*Mpcrad_kpcarcsec)/3600.
cir=Circle((0,0),radius=r200deg,color='None',ec='k')
gca().add_patch(cir)
flag=(g.s.CLUSTER == cl) & g.dvflag
plt.hexbin(d.RA-clusterRA[cl],d.DEC-clusterDec[cl],cmap=cm.Greys,gridsize=40,vmin=0,vmax=10)
if plotnofit:
flag=g.sfsampleflag & ~g.sampleflag & g.dvflag & (g.s.CLUSTER == cl)
plot(ra[flag],dec[flag],'rv',mec='r',mfc='None')
flag=g.sampleflag & g.dvflag & (g.s.CLUSTER == cl)
#print cl, len(ra[flag]),len(dec[flag]),len(s.s.SIZE_RATIO[flag])
if useirsb:
color=log10(g.sigma_ir)
v1=7.6
v2=10.5
colormap=cm.jet
else:
color=g.s.SIZE_RATIO
v1=.1
v2=1
colormap='jet_r'
try:
plt.scatter(ra[flag],dec[flag],s=30,c=color[flag],cmap=colormap,vmin=v1,vmax=v2,edgecolors='k')
except ValueError:
plt.scatter(ra[flag],dec[flag],s=30,c='k',cmap=cm.jet_r,vmin=.1,vmax=1,edgecolors='k')
ax=plt.gca()
fsize=14
t=cluster24Box[cl]
drawbox([t[0]-clusterRA[cl],t[1]-clusterDec[cl],t[2],t[3],t[4]],'g-')
ax=gca()
ax.invert_xaxis()
if plotsingle:
xlabel('$ \Delta RA \ (deg) $',fontsize=22)
ylabel('$ \Delta DEC \ (deg) $',fontsize=22)
legend(numpoints=1,scatterpoints=1)
cname='$'+cl+'$'
text(.1,.8,cname,fontsize=18,transform=ax.transAxes,horizontalalignment='left')
plt.axis([1.8,-1.8,-1.8,1.8])
plt.xticks(np.arange(-1,2,1))
plt.yticks(np.arange(-1,2,1))
allax.append(ax)
multiplotaxes(i)
i+=1
if plotcolorbar:
c=colorbar(ax=allax,fraction=0.05)
c.ax.text(2.2,.5,'$R_{24}/R_d$',rotation=-90,verticalalignment='center',fontsize=20)
plt.text(-.5,-.28,'$\Delta RA \ (deg) $',fontsize=26,horizontalalignment='center',transform=ax.transAxes)
plt.text(-2.4,1.5,'$\Delta Dec \ $',fontsize=26,verticalalignment='center',rotation=90,transform=ax.transAxes,family='serif')
#plt.savefig(homedir+'/research/LocalClusters/SamplePlots/positionson24.eps')
#plt.savefig(homedir+'/research/LocalClusters/SamplePlots/positionson24.png')
plt.savefig(figuredir+'fig3.eps')
def plotRe24vsReall(sbcutobs=20,plotcolorbar=0,fixPA=False,logyflag=False,usedr=False):
figure(figsize=(10,8))
subplots_adjust(hspace=.02,wspace=.02,left=.15,bottom=.15,right=.9,top=.9)
i=1
allax=[]
for cl in clusternamesbylx:
plt.subplot(3,3,i)
flag = (g.s.CLUSTER == cl) & g.sampleflag
g.plotRe24vsRe(plotsingle=0,usemyflag=1,myflag=flag,sbcutobs=sbcutobs,logy=logyflag,fixPA=fixPA,usedr=usedr)
ax=plt.gca()
cname='$'+cl+'$'
plt.text(.9,.8,cname,fontsize=18,transform=ax.transAxes,horizontalalignment='right')
allax.append(ax)
multiplotaxes(i)
i+=1
if plotcolorbar:
if usedr:
cblabel = '$\Delta r/R_{200}$'
cblabel='$log_{10}(\sqrt{(\Delta r/R_{200})^2 + (\Delta v/\sigma)^2})$'
else:
cblabel='$log_{10}(M_*/M\odot) $'
plt.colorbar(ax=allax,fraction=0.08,label=cblabel)
plt.text(-.5,-.3,'$R_d \ (kpc)$',fontsize=22,horizontalalignment='center',transform=ax.transAxes)
plt.text(-2.4,1.5,'$R_{24} \ (kpc) $',fontsize=22,verticalalignment='center',rotation=90,transform=ax.transAxes,family='serif')
savefig(figuredir+'fig10.eps')
def plotsizevscluster(masscut=None,btcut=None):
clusters = ['Hercules','A1367','A2052','A2063']
bigmomma = ['Coma']
zflag = np.ones(len(g.sampleflag),'bool')
if masscut != None:
zflag = zflag & (g.logstellarmass < 10.)
if btcut != None:
zflag = zflag & (g.gim2d.B_T_r < btcut)
btcut = .3
flag = zflag & g.sampleflag & g.membflag
groupflag = flag & ((g.s.CLUSTER == 'MKW11') | (g.s.CLUSTER == 'MKW8') | (g.s.CLUSTER == 'AWM4') | (g.s.CLUSTER == 'NGC6107'))
clusterflag = flag & ((g.s.CLUSTER == 'Hercules') | (g.s.CLUSTER == 'A1367') | (g.s.CLUSTER == 'A2052') | (g.s.CLUSTER == 'A2063'))
bigmommaflag = flag & (g.s.CLUSTER == 'Coma')
exteriorflag = zflag & g.sampleflag & (g.gim2d.B_T_r < btcut) & ~g.membflag & ~g.dvflag
nearexteriorflag = zflag & g.sampleflag & (g.gim2d.B_T_r < btcut) & ~g.membflag & g.dvflag
envs = [exteriorflag, nearexteriorflag,groupflag, clusterflag, bigmommaflag]
plt.figure()
ypoint = []
y2 = []
y2err=[]
yerr = []
for i in range(len(envs)):
ypoint.append(np.median(g.sizeratio[envs[i]]))
#ypoint.append(ws.weighted_mean(s.sizeratio[envs[i]],weights=1./s.sizeratioERR[envs[i]]))
yerr.append(np.std(g.sizeratio[envs[i]])/np.sqrt(1.*np.sum(envs[i])))
y2.append(np.median(g.gim2d.B_T_r[envs[i]]))
#ypoint.append(ws.weighted_mean(s.sizeratio[envs[i]],weights=1./s.sizeratioERR[envs[i]]))
y2err.append(np.std(g.gim2d.B_T_r[envs[i]])/np.sqrt(1.*np.sum(envs[i])))
y=g.sizeratio[envs[i]]
plt.plot(i,np.median(y),'ko',markersize=10)
bp = boxplot([y],positions=[i],widths=[.3],whis=99)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='black', marker='+')
plt.setp(bp['medians'], color='black')
ax = plt.gca()
plt.text(.95,.94,'$Far-External: \ \Delta v/\sigma > 3 $',fontsize=14,transform = ax.transAxes,horizontalalignment='right')
plt.text(.95,.86,'$Near-External: \ \Delta v/\sigma < 3$',fontsize=14,transform = ax.transAxes,horizontalalignment='right')
plt.text(.95,.78,'$Group: \ \sigma < 700 \ km/s$',fontsize=14,transform = ax.transAxes,horizontalalignment='right')
plt.text(.95,.70,'$Cluster: \ \sigma > 700 \ km/s$',fontsize=14,transform = ax.transAxes,horizontalalignment='right')
plt.xticks(np.arange(len(envs)),['$Field$', '$Near-Field$', '$Group$', '$Cluster$', '$Coma$'],fontsize=16)
plt.xlim(-.3,len(envs)-.7)
plt.ylim(-.1,2.95)
#plt.legend()
plt.ylabel('$R_{24}/R_d$')
plt.xlabel('$Environment$')
plt.subplots_adjust(bottom=.2,top=.9,left=.15,right=.92)
#plt.subplots_adjust(bottom=.15)
plt.savefig(figuredir+'fig15.eps')
def paperTable1Paper1(sbcutobs=20,masscut=0):
#clustersigma={'MKW11':361, 'MKW8':325., 'AWM4':500., 'A2063':660., 'A2052':562., 'NGC6107':500., 'Coma':1000., 'A1367':745., 'Hercules':689.}
#clusterz={'MKW11':.022849,'MKW8':.027,'AWM4':.031755,'A2063':.034937,'A2052':.035491,'NGC6107':.030658,'Coma':.023,'A1367':.028,'Hercules':.037,'MKW10':.02054}
#clusterbiweightcenter={'MKW11':6897,'MKW8':8045,'AWM4':9636,'A2063':10426,'A2052':10354,'NGC6107':9397,'Coma':7015,'A1367':6507,'Hercules':10941}
#clusterbiweightcenter_errp={'MKW11':45,'MKW8':36,'AWM4':51,'A2063':63,'A2052':64,'NGC6107':48,'Coma':41,'A1367':48,'Hercules':48}
#clusterbiweightcenter_errm={'MK
#outfile=open(homedir+'/Dropbox/Research/MyPapers/LCSpaper1/Table1.tex','w')
outfile=open(figuredir+'Table1.tex','w')
outfile.write('\\begin{deluxetable*}{ccccc} \n')
outfile.write('\\tablecaption{Cluster Properties and Galaxy Sample Sizes \label{finalsample}} \n')
#outfile.write('\\tablehead{\colhead{Cluster} &\colhead{Biweight Central Velocity} & \colhead{Lit.} & \colhead{Biweight Scale} & \colhead{Lit} & \colhead{N$_{spiral}$} & \colhead{N$_{spiral}$} } \n')# % \\\\ & \colhead{(km/s)} & \colhead{(km/s)} & \colhead{(km/s)} & \colhead{(km/s)} & \colhead{Member} & \colhead{External}} \n')
outfile.write('\\tablehead{\colhead{Cluster} &\colhead{Biweight Central Velocity} & \colhead{Biweight Scale} & \colhead{N$_{gal}$} & \colhead{N$_{gal}$} \\\\ & \colhead{(km/s)} & \colhead{(km/s)} & Core & External } \n')
outfile.write('\startdata \n')
for cl in clusternamesbydistance:
nmemb_spiral = sum((g.s.CLUSTER == cl) & g.sampleflag & g.membflag)
nnearexterior_spiral = sum((g.s.CLUSTER == cl) & g.sampleflag & ~g.membflag & g.dvflag)
nexterior_spiral = sum((g.s.CLUSTER == cl) & g.sampleflag & ~g.membflag & ~g.dvflag)
exterior_spiral = sum((g.s.CLUSTER == cl) & g.sampleflag & ~g.membflag)
#tableline='%s & %i$^{%+i}_{-%i}$ & %i & %i$^{+%i}_{-%i}$ & %i & %i & %i & %i \\\\ \n' %(cl, clusterbiweightcenter[cl],clusterbiweightcenter_errp[cl],clusterbiweightcenter_errm[cl],int(round(clusterz[cl]*3.e5)), clusterbiweightscale[cl],clusterbiweightscale_errp[cl],clusterbiweightscale_errm[cl],int(round(clustersigma[cl])),nmemb_spiral,nexterior_spiral)
tableline='%s & %i$^{%+i}_{-%i}$ & %i$^{+%i}_{-%i}$ & %i & %i \\\\ \n' %(cl, clusterbiweightcenter[cl],clusterbiweightcenter_errp[cl],clusterbiweightcenter_errm[cl], clusterbiweightscale[cl],clusterbiweightscale_errp[cl],clusterbiweightscale_errm[cl],nmemb_spiral,exterior_spiral)
outfile.write(tableline)
outfile.write('\enddata \n')
outfile.write('\end{deluxetable*} \n')
outfile.close()
if __name__ == '__main__':
homedir = os.environ['HOME']
g = galaxies(homedir+'/github/LCS/')
#plotsigmaLx() # Fig 1
#plotpositionson24() # Fig 3
#g.plotsizedvdr(plothexbin=True,plotmembcut=False,plotoman=True,plotbadfits=0,hexbinmax=40,colormin=.2,colormax=1.1) # Fig 4
#g.compare_cluster_exterior() # Fig 5
#plotRe24vsReall(logyflag=False) # Fig 10
#g.plotsizehist(colorflag=True) # Fig 11a
#g.plotsizehist(btcut=.3,colorflag=True) # Fig 11b
#g.plotsize3panel(use_median=False,equal_pop_bins=True) # Fig 12
#g.plotsizestellarmass(use_median=False,equal_pop_bins=True,btmax=0.3) # Fig 13
#plotsizevsMclallwhisker(btcut=.3) # Fig 14
#plotsizevscluster(btcut=.3) # Fig 15
#g.plotsizeHIfrac() # Fig 16a
#g.plotsizeHIdef() # Fig 16b
#g.plotNUVrsize() # Fig 17
|
rfinn/LCS
|
python/LCSpaper1.py
|
Python
|
gpl-3.0
| 45,395
|
[
"Galaxy"
] |
857caa27ee8e4bf1c9d2132dc232379cf4265b74b0d0d6728f6c224ac09ca823
|
# -*- encoding:utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys, textwrap
from ..docscrape import NumpyDocString, FunctionDoc, ClassDoc
from ..docscrape_sphinx import SphinxDocString, SphinxClassDoc
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
doc_txt = '''\
numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
See Also
--------
some, other, funcs
otherfunc : relationship
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss
'''
doc = NumpyDocString(doc_txt)
def test_signature():
assert doc['Signature'].startswith('numpy.multivariate_normal(')
assert doc['Signature'].endswith('spam=None)')
def test_summary():
assert doc['Summary'][0].startswith('Draw values')
assert doc['Summary'][-1].endswith('covariance.')
def test_extended_summary():
assert doc['Extended Summary'][0].startswith('The multivariate normal')
def test_parameters():
assert len(doc['Parameters']) == 3
assert [n for n,_,_ in doc['Parameters']] == ['mean','cov','shape']
arg, arg_type, desc = doc['Parameters'][1]
assert arg_type == '(N, N) ndarray'
assert desc[0].startswith('Covariance matrix')
assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3'
def test_other_parameters():
assert len(doc['Other Parameters']) == 1
assert [n for n,_,_ in doc['Other Parameters']] == ['spam']
arg, arg_type, desc = doc['Other Parameters'][0]
assert arg_type == 'parrot'
assert desc[0].startswith('A parrot off its mortal coil')
def test_returns():
assert len(doc['Returns']) == 2
arg, arg_type, desc = doc['Returns'][0]
assert arg == 'out'
assert arg_type == 'ndarray'
assert desc[0].startswith('The drawn samples')
assert desc[-1].endswith('distribution.')
arg, arg_type, desc = doc['Returns'][1]
assert arg == 'list of str'
assert arg_type == ''
assert desc[0].startswith('This is not a real')
assert desc[-1].endswith('anonymous return values.')
def test_notes():
assert doc['Notes'][0].startswith('Instead')
assert doc['Notes'][-1].endswith('definite.')
assert len(doc['Notes']) == 17
def test_references():
assert doc['References'][0].startswith('..')
assert doc['References'][-1].endswith('2001.')
def test_examples():
assert doc['Examples'][0].startswith('>>>')
assert doc['Examples'][-1].endswith('True]')
def test_index():
assert doc['index']['default'] == 'random'
assert len(doc['index']) == 2
assert len(doc['index']['refguide']) == 2
def non_blank_line_by_line_compare(a,b):
a = textwrap.dedent(a)
b = textwrap.dedent(b)
a = [l.rstrip() for l in a.split('\n') if l.strip()]
b = [l.rstrip() for l in b.split('\n') if l.strip()]
for n,line in enumerate(a):
if not line == b[n]:
raise AssertionError("Lines %s of a and b differ: "
"\n>>> %s\n<<< %s\n" %
(n,line,b[n]))
def test_str():
non_blank_line_by_line_compare(str(doc),
"""numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
See Also
--------
`some`_, `other`_, `funcs`_
`otherfunc`_
relationship
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss""")
def test_sphinx_str():
sphinx_doc = SphinxDocString(doc_txt)
non_blank_line_by_line_compare(str(sphinx_doc),
"""
.. index:: random
single: random;distributions, random;gauss
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
:Parameters:
**mean** : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
**cov** : (N, N) ndarray
Covariance matrix of the distribution.
**shape** : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
:Returns:
**out** : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
:Other Parameters:
**spam** : parrot
A parrot off its mortal coil.
:Raises:
**RuntimeError**
Some error
:Warns:
**RuntimeWarning**
Some warning
.. warning::
Certain warnings apply.
.. seealso::
:obj:`some`, :obj:`other`, :obj:`funcs`
:obj:`otherfunc`
relationship
.. rubric:: Notes
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
.. rubric:: References
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
.. only:: latex
[1]_, [2]_
.. rubric:: Examples
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
""")
doc2 = NumpyDocString("""
Returns array of indices of the maximum values of along the given axis.
Parameters
----------
a : {array_like}
Array to look in.
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis""")
def test_parameters_without_extended_description():
assert len(doc2['Parameters']) == 2
doc3 = NumpyDocString("""
my_signature(*params, **kwds)
Return this and that.
""")
def test_escape_stars():
signature = str(doc3).split('\n')[0]
signature == 'my_signature(\*params, \*\*kwds)'
doc4 = NumpyDocString(
"""a.conj()
Return an array with all complex-valued elements conjugated.""")
def test_empty_extended_summary():
assert doc4['Extended Summary'] == []
doc5 = NumpyDocString(
"""
a.something()
Raises
------
LinAlgException
If array is singular.
Warns
-----
SomeWarning
If needed
""")
def test_raises():
assert len(doc5['Raises']) == 1
name,_,desc = doc5['Raises'][0]
assert name == 'LinAlgException'
assert desc == ['If array is singular.']
def test_warns():
assert len(doc5['Warns']) == 1
name,_,desc = doc5['Warns'][0]
assert name == 'SomeWarning'
assert desc == ['If needed']
def test_see_also():
doc6 = NumpyDocString(
"""
z(x,theta)
See Also
--------
func_a, func_b, func_c
func_d : some equivalent func
foo.func_e : some other func over
multiple lines
func_f, func_g, :meth:`func_h`, func_j,
func_k
:obj:`baz.obj_q`
:class:`class_j`: fubar
foobar
""")
assert len(doc6['See Also']) == 12
for func, desc, role in doc6['See Also']:
if func in ('func_a', 'func_b', 'func_c', 'func_f',
'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
assert(not desc)
else:
assert(desc)
if func == 'func_h':
assert role == 'meth'
elif func == 'baz.obj_q':
assert role == 'obj'
elif func == 'class_j':
assert role == 'class'
else:
assert role is None
if func == 'func_d':
assert desc == ['some equivalent func']
elif func == 'foo.func_e':
assert desc == ['some other func over', 'multiple lines']
elif func == 'class_j':
assert desc == ['fubar', 'foobar']
def test_see_also_print():
class Dummy(object):
"""
See Also
--------
func_a, func_b
func_c : some relationship
goes here
func_d
"""
pass
obj = Dummy()
s = str(FunctionDoc(obj, role='func'))
assert(':func:`func_a`, :func:`func_b`' in s)
assert(' some relationship' in s)
assert(':func:`func_d`' in s)
doc7 = NumpyDocString("""
Doc starts on second line.
""")
def test_empty_first_line():
assert doc7['Summary'][0].startswith('Doc starts')
def test_no_summary():
str(SphinxDocString("""
Parameters
----------"""))
def test_unicode():
doc = SphinxDocString("""
öäöäöäöäöåååå
öäöäöäööäååå
Parameters
----------
ååå : äää
ööö
Returns
-------
ååå : ööö
äää
""")
assert isinstance(doc['Summary'][0], str)
assert doc['Summary'][0] == 'öäöäöäöäöåååå'
def test_plot_examples():
cfg = dict(use_plots=True)
doc = SphinxDocString("""
Examples
--------
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3],[4,5,6])
>>> plt.show()
""", config=cfg)
assert 'plot::' in str(doc), str(doc)
doc = SphinxDocString("""
Examples
--------
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3],[4,5,6])
plt.show()
""", config=cfg)
assert str(doc).count('plot::') == 1, str(doc)
def test_class_members():
class Dummy(object):
"""
Dummy class.
"""
def spam(self, a, b):
"""Spam\n\nSpam spam."""
pass
def ham(self, c, d):
"""Cheese\n\nNo cheese."""
pass
@property
def spammity(self):
"""Spammity index"""
return 0.95
class Ignorable(object):
"""local class, to be ignored"""
pass
for cls in (ClassDoc, SphinxClassDoc):
doc = cls(Dummy, config=dict(show_class_members=False))
assert 'Methods' not in str(doc), (cls, str(doc))
assert 'spam' not in str(doc), (cls, str(doc))
assert 'ham' not in str(doc), (cls, str(doc))
assert 'spammity' not in str(doc), (cls, str(doc))
assert 'Spammity index' not in str(doc), (cls, str(doc))
doc = cls(Dummy, config=dict(show_class_members=True))
assert 'Methods' in str(doc), (cls, str(doc))
assert 'spam' in str(doc), (cls, str(doc))
assert 'ham' in str(doc), (cls, str(doc))
assert 'spammity' in str(doc), (cls, str(doc))
if cls is SphinxClassDoc:
assert '.. autosummary::' in str(doc), str(doc)
else:
assert 'Spammity index' in str(doc), str(doc)
def test_duplicate_signature():
# Duplicate function signatures occur e.g. in ufuncs, when the
# automatic mechanism adds one, and a more detailed comes from the
# docstring itself.
doc = NumpyDocString(
"""
z(x1, x2)
z(a, theta)
""")
assert doc['Signature'].strip() == 'z(a, theta)'
class_doc_txt = """
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Methods
-------
a
b
c
Examples
--------
For usage examples, see `ode`.
"""
def test_class_members_doc():
doc = ClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Examples
--------
For usage examples, see `ode`.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Methods
-------
a
b
c
.. index::
""")
def test_class_members_doc_sphinx():
doc = SphinxClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
:Parameters:
**f** : callable ``f(t, y, *f_args)``
Aaa.
**jac** : callable ``jac(t, y, *jac_args)``
Bbb.
.. rubric:: Examples
For usage examples, see `ode`.
.. rubric:: Attributes
=== ==========
t (float) Current time.
y (ndarray) Current variable values.
=== ==========
.. rubric:: Methods
=== ==========
a
b
c
=== ==========
""")
|
eteq/astropy-helpers
|
astropy_helpers/sphinx/ext/tests/test_docscrape.py
|
Python
|
bsd-3-clause
| 18,105
|
[
"Gaussian"
] |
be220211e1fdbd056a1f9f57c204159ebd55523d0c6ec36d32f0c760bd3bcd77
|
# -*- coding: utf-8 -*-
"""Page and page Component classes
The keteparaha Page and Component classes represent web pages and components
of web pages.
Pages are identified by the URL of the browser, and components by the CSS
selector that is used to retrieve them.
If you perform an action that causes the browser to visit a new URL, and
you have defined a Page class with that URL, then the new page will
automatically be returned from that action.
Creating an instance of a page object will automatically cause the browser to
visit that page as well.
Example:
BASE_URL = 'http://my-site.com'
class Home(Page):
url = BASE_URL + '/'
class Dashboard(Page):
url = BASE_URL + '/dashboard/'
class
home = Home(driver) # driver is a WebDriver instance, browser would
# automatically visit the home page at this point
dashboard = home.click_link('Dashboard')
"""
from __future__ import unicode_literals
import collections
from inspect import isclass
import time
from selenium.common import exceptions
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.wait import TimeoutException, WebDriverWait
from six import with_metaclass
from urlparse import parse_qs, urlparse
import re
from .expectations import (
_wait_for_condition,
component_to_be_clickable,
text_to_be_present_in_component
)
from . import flow
ELEMENT_TIMEOUT = 10
""" (int): The seconds that a component will wait to be visible, clickable, or
present before raising a TimeoutException
"""
__all__ = ['Component', 'Page']
# Workaround for backwards compatibility with Python 2.7
try:
unicode = unicode
except NameError:
basestring = (str, bytes)
def match_url(url, candidates):
parsed = urlparse(url)
for candidate in candidates:
if not isinstance(candidate, basestring):
continue
if re.match(r'https?://', candidate):
candidate_path = urlparse(candidate).path
else:
candidate_path = candidate
match = re.match(candidate_path, parsed.path)
if match:
kwargs = match.groupdict()
args = tuple(set(match.groups()) - set(kwargs.values()))
if parsed.query:
kwargs['_query'] = parse_qs(parsed.query)
if parsed.fragment:
kwargs['_fragment'] = parsed.fragment
return candidate, args, kwargs
class _Registry(collections.MutableMapping):
"""A singleton registry for pages and components"""
store = dict()
def __delitem__(self, key):
pass
def __getitem__(self, key):
return self.store[key]
def __iter__(self):
for value in self.store.values():
yield value
def __setitem__(self, key, value):
self.store[key] = value
def __len__(self):
return len(self.store)
def __call__(self, selector):
try:
return self.store[selector]
except KeyError:
return self.make_class(selector)
def make_class(self, selector):
try:
return type(
'DynamicComponent', (Component,), {'selector': selector})
except TypeError: # Python < 3
return type(
b'DynamicComponent', (Component,), {'selector': selector})
return
def keys(self):
return self.store.keys()
class _RegistryMeta(type):
"""Add our pages and components to a central registry"""
def __init__(cls, name, bases, dct):
if dct.get('url'):
cls._registry[dct.get('url')] = cls
elif dct.get('selector'):
cls._registry[dct.get('selector')] = cls
return super(_RegistryMeta, cls).__init__(name, bases, dct)
class _SeleniumWrapper(object):
"""Mixin for page and component class that understands the WebDriver API
"""
TimeoutException = TimeoutException
class ComponentMissing(Exception):
pass
def _get_component_class(self, component_or_selector):
"""Ensure we have a component class
Either return argument if it's a component, get a registered component,
or dynamically create a component.
"""
if isclass(component_or_selector) and issubclass(
component_or_selector, Component):
return component_or_selector
return self._registry(component_or_selector)
def get_component(self, component_or_selector):
"""Return an initialised component present in page
takes either a component class to find in the page or a css selector.
If the selector is not present in the page raises a ComponentMissing
error.
"""
ComponentClass = self._get_component_class(component_or_selector)
try:
return ComponentClass(self)
except TimeoutException:
raise self.ComponentMissing(
'"{0}" could not be found in page'.format(
ComponentClass.selector))
def get_components(self, component_or_selector):
"""Return an list of initialised components present in page
Returns an empty list if no components could be found
"""
ComponentClass = self._get_component_class(component_or_selector)
components = []
try:
elements = self.get_elements(ComponentClass.selector)
except TimeoutException:
return components
for idx, element in enumerate(elements):
comp_inst = self._get_component_class(
component_or_selector)(self, find_by='index_position')
comp_inst._index_position = idx
components.append(comp_inst)
return components
def get_element(self, selector, driver=None):
"""Get the DOM element identified by the css selector"""
return _wait_for_condition(
ec.presence_of_element_located((By.CSS_SELECTOR, selector)),
self,
message='No element found with selector "{0}".'.format(selector),
driver=driver
)
def get_clickable_element(self, selector, driver=None):
"""Return an element that can be clicked, or raise an error"""
return _wait_for_condition(
ec.element_to_be_clickable((By.CSS_SELECTOR, selector)),
self,
message='No clickable element found with selector "{0}".'.format(
selector),
driver=driver
)
def get_visible_element(self, selector):
"""Return an element that is visible, or raise an error"""
return _wait_for_condition(
ec.visibility_of_element_located((By.CSS_SELECTOR, selector)),
self,
message='No visible element found with selector "{0}".'.format(
selector)
)
def get_element_by_link_text(self, link_text):
"""Get the DOM element identified by the css selector"""
return _wait_for_condition(
ec.presence_of_element_located((By.LINK_TEXT, link_text)),
self,
message='No link with text "{0}".'.format(link_text)
)
def get_elements(self, selector):
"""Get a list of elements identified by the css selector"""
return _wait_for_condition(
ec.presence_of_all_elements_located((By.CSS_SELECTOR, selector)),
self
)
def get_attribute(self, attribute):
"""Return the value of an attribute of the component"""
return self._element.get_attribute(attribute)
def wait_for_invisibility(self, selector):
"""Pause until the element identified by selector is invisible"""
return _wait_for_condition(
ec.invisibility_of_element_located((By.CSS_SELECTOR, selector)),
self
)
def text_in_element(self, selector, text):
"""Return whether the text is in the element identified by selector"""
return _wait_for_condition(
ec.text_to_be_present_in_element(
(By.CSS_SELECTOR, selector), text),
self,
message='"{0}" not found in "{1}".'.format(
text, self.get_component(selector).text)
)
def has_text(self, text):
"""Return whether the text is in the component"""
return _wait_for_condition(
text_to_be_present_in_component(self, text),
self,
message='"{0}" not found in "{1}".'.format(
text, self._element.text)
)
def _click(self, component, opens=None):
"""Click an element and return an appropriate component or page
component -- a keteparaha.page.Component
opens -- a keteparaha.page.Component to initialise and return
returns -- either a new Page object if the url changes, the initialised
Component passed in as opens, or itself
"""
_wait_for_condition(
component_to_be_clickable(component), component,
message='"{0}" was never clickable'.format(self)
)
component._element.click()
if opens and isinstance(opens, basestring):
# open is a string look it up in registry
return self._registry(opens)(self)
if opens and issubclass(opens, Component) and isclass(opens):
# open is an Component class, use it
return opens(self)
if opens and isinstance(opens, Component):
# open is an initialised component, use it
return opens
if self.url != self.location() and self.location() in self._registry:
# We have a page with a simple url
return self._registry(self.location())(driver=self._driver)
if (not match_url(self.url, (self.location(),))
and match_url(self.location(), self._registry.keys())):
# We have a page with a complex url that's in the registry
match, args, kwargs = match_url(
self.location(),
self._registry.keys()
)
page = self._registry[match](driver=self._driver)
page.setup(*args, **kwargs)
return page
return self
def click(self, selector=None, opens=None):
"""Main method for interacting with a page or component
Returns either self, a new page object based on browser url, or a
page component based on the selector passed in as 'opens'.
selector can be a css selector in the form of a string, or a
selenium WebElement.
"""
if isinstance(selector, basestring):
# selector passed in, get component class from registry
component = self._registry(selector)(self)
return self._click(component, opens)
elif isinstance(selector, Component) and isclass(selector):
# We already have a component class, so just use it
component = selector(self)
return self._click(component, opens)
elif isinstance(selector, Component) and not isclass(selector):
# We already have an initalised component, so just use it
component = selector
return self._click(component, opens)
elif selector is None:
# We have no selector so click on yourself
return self._click(self, opens)
raise ValueError(
'selector, "{0}", not a string or Component instance.'.format(
selector))
def click_link(self, link_text, opens=None):
component = Component(self, find_by='link_text')
component.selector = link_text
return self._click(component, opens)
def click_button(self, button_text, opens=None):
"""Find buttons on the page and click the first one with the text"""
component = Component(self, find_by='button_text')
component.selector = button_text
return self._click(component, opens)
def location(self):
"""The current page location without any query parameters"""
return self.page._driver.current_url
def select_option(self, selector, option_text):
"""Select option in dropdown identified by selector with given text"""
def find_and_select(selector, option_text):
return Select(
self.get_element(selector)
).select_by_visible_text(option_text),
retryable_find_and_select = flow.retry(
find_and_select, exceptions.NoSuchElementException
)
return retryable_find_and_select(selector, option_text)
def scroll_into_view(self):
"""Scroll the window until the component is visible"""
self._element.location_once_scrolled_into_view
def clear(self, selector):
"""Clear text out of input identified by CSS selector"""
try:
self.get_visible_element(selector).clear()
except (exceptions.InvalidElementStateException,
exceptions.WebDriverException):
raise exceptions.WebDriverException(
'You cannot clear that element')
def hover(self, selector, opens=None):
"""Hover over element identified by CSS selector"""
ActionChains(self._driver).move_to_element(
self.get_element(selector)).perform()
if opens:
return self._get_component_class(opens)(self)
def enter_text(self, selector, text):
"""Enter text into DOM element identified by selector
The function performs some error checking because as of Jan 2014
send_keys on the element is unreliable at text entry.
"""
element = self.get_visible_element(selector)
for _ in range(5):
element.send_keys(*text)
try:
value_in_place = element.get_attribute("value") or element.text
except exceptions.StaleElementReferenceException:
return
expected = "".join([unicode(v) for v in text])
if value_in_place == expected:
return
try:
element.clear()
except (exceptions.InvalidElementStateException,
exceptions.WebDriverException):
return # Element is not user editable and can't be cleared
time.sleep(0.2)
raise AssertionError("Unable to correctly type {0}".format(text))
class _WebElementProxy(object):
"""A proxy to the Selenium WebElement identified by obj's selector"""
def __init__(self):
self.selector = 'html'
def __get__(self, obj, owner):
selector = obj.selector if hasattr(obj, 'selector') else self.selector
if obj._find_by == 'selector':
try:
return obj._driver.find_element_by_css_selector(selector)
except exceptions.NoSuchElementException:
return WebDriverWait(obj._driver, ELEMENT_TIMEOUT).until(
ec.presence_of_element_located(
(
By.CSS_SELECTOR,
selector
)
),
'No element "{0}", waited {1} seconds'.format(
selector, ELEMENT_TIMEOUT
)
)
elif obj._find_by == 'button_text':
for button in obj._driver.find_elements_by_tag_name("button"):
if button.text == obj.selector and button.is_displayed():
return button
raise AssertionError(
"Could not find a button with the text '%s'" % (selector,)
)
elif obj._find_by == 'link_text':
try:
return obj._driver.find_element_by_link_text(selector)
except exceptions.NoSuchElementException:
return WebDriverWait(obj._driver, ELEMENT_TIMEOUT).until(
ec.presence_of_element_located(
(
By.LINK_TEXT,
selector
)
),
'No link with text "{0}", waited {1} seconds'.format(
selector, ELEMENT_TIMEOUT
)
)
elif obj._find_by == 'index_position':
idx = obj._index_position
return obj._driver.find_elements_by_css_selector(selector)[idx]
else:
raise ValueError('Element proxy needs to know how to find element')
def __set__(self, obj, value):
raise AttributeError()
class WebDriverOnly(object):
"""This attribute must be a WebDriver instance"""
def __set__(self, obj, value):
if not isinstance(value, WebDriver):
raise TypeError('driver must be an instance of WebDriver')
self.driver = value
def __get__(self, obj, owner):
return self.driver
class _BaseComponent(object):
_element = _WebElementProxy()
@property
def text(self):
"""The visible text of the component"""
return self._element.text
class Component(
with_metaclass(_RegistryMeta, _BaseComponent, _SeleniumWrapper)):
"""Generic page component, intended to be subclassed
Pages and Components are stored in a registry and switched to dynamically
class ShoppingBasket(Component):
selector = '#shopping-basket'
def remove_item(self, name):
contents = self.get_components('tr')
for item in contents:
if name in item.text:
item.click('.remove')
return
raise AssertionError('No item in basket called "{0}"'.format(name))
page = Page(driver)
basket = page.click_link('Shopping Basket', opens=ShoppingBasket)
# The following would also work identically:
## basket = page.click_link('Shopping Basket', opens='#shopping-basket')
basket.remove_item('Buzz Lightyear')
"""
_registry = _Registry()
selector = None
def __repr__(self):
output = '{0}(selector="{1}")'.format(
self.__class__.__name__, self.selector)
if self._find_by == 'index_position':
output = output + '[{0}]'.format(self._index_position)
return output
def __init__(self, parent, driver=None, find_by='selector'):
self._parent = parent
self._find_by = find_by
@property
def _driver(self):
return self._parent._element
@property
def page(self):
if isinstance(self._parent, Page):
return self._parent
return self._parent.page
@property
def url(self):
"""The url of the page which the component is inside"""
return self.page.url
class Page(
with_metaclass(_RegistryMeta, _BaseComponent, _SeleniumWrapper)):
"""Generic web page, intended to be subclassed
Pages and Components are stored in a registry and switched to dynamically
class LoginPage(Page):
url = 'https://your-site.com/login
def login(username, password):
self.enter_text("input[name=username]", username)
self.enter_text("input[name=password]", password)
return self.click("input[type=submit]")
"""
_driver = WebDriverOnly()
_registry = _Registry()
def __init__(self, driver=None):
self._find_by = 'selector'
self.selector = 'html'
try:
self._driver = driver
except TypeError: # Driver was a WebElement, not WebDriver
self._driver = driver.parent
if self.location() != self.url:
self._driver.get(self.url)
def setup(self, *args, **kwargs):
raise NotImplementedError(
'Pages that implement a complex url need to implement a setup'
'method. This page is ' 'being passed args: {} and kwargs: '
'{}'.format(args, kwargs)
)
@property
def page(self):
"""Unifies the api for pages and components slightly"""
return self
|
aychedee/keteparaha
|
keteparaha/page.py
|
Python
|
mit
| 20,391
|
[
"VisIt"
] |
737afae2ac522035ab62f4a30fc1baa3ee3e3dd3329855c69c620f42926a3685
|
"""
Beta diversity measures (:mod:`skbio.diversity.beta`)
=====================================================
.. currentmodule:: skbio.diversity.beta
This package contains helper functions for working with scipy's pairwise
distance (``pdist``) functions in scikit-bio, and will eventually be expanded
to contain pairwise distance/dissimilarity methods that are not implemented
(or planned to be implemented) in scipy.
The functions in this package currently support applying ``pdist`` functions
to all pairs of samples in a sample by observation count or abundance matrix
and returning an ``skbio.DistanceMatrix`` object. This application is
illustrated below for a few different forms of input.
Functions
---------
.. autosummary::
:toctree: generated/
pw_distances
pw_distances_from_table
Examples
--------
Create a table containing 7 OTUs and 6 samples:
.. plot::
:context:
>>> from skbio.diversity.beta import pw_distances
>>> import numpy as np
>>> data = [[23, 64, 14, 0, 0, 3, 1],
... [0, 3, 35, 42, 0, 12, 1],
... [0, 5, 5, 0, 40, 40, 0],
... [44, 35, 9, 0, 1, 0, 0],
... [0, 2, 8, 0, 35, 45, 1],
... [0, 0, 25, 35, 0, 19, 0]]
>>> ids = list('ABCDEF')
Compute Bray-Curtis distances between all pairs of samples and return a
``DistanceMatrix`` object:
>>> bc_dm = pw_distances(data, ids, "braycurtis")
>>> print(bc_dm)
6x6 distance matrix
IDs:
'A', 'B', 'C', 'D', 'E', 'F'
Data:
[[ 0. 0.78787879 0.86666667 0.30927835 0.85714286 0.81521739]
[ 0.78787879 0. 0.78142077 0.86813187 0.75 0.1627907 ]
[ 0.86666667 0.78142077 0. 0.87709497 0.09392265 0.71597633]
[ 0.30927835 0.86813187 0.87709497 0. 0.87777778 0.89285714]
[ 0.85714286 0.75 0.09392265 0.87777778 0. 0.68235294]
[ 0.81521739 0.1627907 0.71597633 0.89285714 0.68235294 0. ]]
Compute Jaccard distances between all pairs of samples and return a
``DistanceMatrix`` object:
>>> j_dm = pw_distances(data, ids, "jaccard")
>>> print(j_dm)
6x6 distance matrix
IDs:
'A', 'B', 'C', 'D', 'E', 'F'
Data:
[[ 0. 0.83333333 1. 1. 0.83333333 1. ]
[ 0.83333333 0. 1. 1. 0.83333333 1. ]
[ 1. 1. 0. 1. 1. 1. ]
[ 1. 1. 1. 0. 1. 1. ]
[ 0.83333333 0.83333333 1. 1. 0. 1. ]
[ 1. 1. 1. 1. 1. 0. ]]
Determine if the resulting distance matrices are significantly correlated
by computing the Mantel correlation between them. Then determine if the
p-value is significant based on an alpha of 0.05:
>>> from skbio.stats.distance import mantel
>>> r, p_value, n = mantel(j_dm, bc_dm)
>>> print(r)
-0.209362157621
>>> print(p_value < 0.05)
False
Compute PCoA for both distance matrices, and then find the Procrustes
M-squared value that results from comparing the coordinate matrices.
>>> from skbio.stats.ordination import PCoA
>>> bc_pc = PCoA(bc_dm).scores()
>>> j_pc = PCoA(j_dm).scores()
>>> from skbio.stats.spatial import procrustes
>>> print(procrustes(bc_pc.site, j_pc.site)[2])
0.466134984787
All of this only gets interesting in the context of sample metadata, so
let's define some:
>>> import pandas as pd
>>> try:
... # not necessary for normal use
... pd.set_option('show_dimensions', True)
... except KeyError:
... pass
>>> sample_md = {
... 'A': {'body_site': 'gut', 'subject': 's1'},
... 'B': {'body_site': 'skin', 'subject': 's1'},
... 'C': {'body_site': 'tongue', 'subject': 's1'},
... 'D': {'body_site': 'gut', 'subject': 's2'},
... 'E': {'body_site': 'tongue', 'subject': 's2'},
... 'F': {'body_site': 'skin', 'subject': 's2'}}
>>> sample_md = pd.DataFrame.from_dict(sample_md, orient='index')
>>> sample_md
subject body_site
A s1 gut
B s1 skin
C s1 tongue
D s2 gut
E s2 tongue
F s2 skin
<BLANKLINE>
[6 rows x 2 columns]
Now let's plot our PCoA results, coloring each sample by the subject it
was taken from:
>>> fig = bc_pc.plot(sample_md, 'subject',
... axis_labels=('PC 1', 'PC 2', 'PC 3'),
... title='Samples colored by subject', cmap='jet', s=50)
.. plot::
:context:
We don't see any clustering/grouping of samples. If we were to instead color
the samples by the body site they were taken from, we see that the samples
form three separate groups:
>>> import matplotlib.pyplot as plt
>>> plt.close('all') # not necessary for normal use
>>> fig = bc_pc.plot(sample_md, 'body_site',
... axis_labels=('PC 1', 'PC 2', 'PC 3'),
... title='Samples colored by body site', cmap='jet', s=50)
Ordination techniques, such as PCoA, are useful for exploratory analysis. The
next step is to quantify the strength of the grouping/clustering that we see in
ordination plots. There are many statistical methods available to accomplish
this; many operate on distance matrices. Let's use ANOSIM to quantify the
strength of the clustering we see in the ordination plots above, using our
Bray-Curtis distance matrix and sample metadata.
First test the grouping of samples by subject:
>>> from skbio.stats.distance import anosim
>>> results = anosim(bc_dm, sample_md, column='subject', permutations=999)
>>> results['test statistic']
-0.4074074074074075
>>> results['p-value'] < 0.1
False
The negative value of ANOSIM's R statistic indicates anti-clustering and the
p-value is insignificant at an alpha of 0.1.
Now let's test the grouping of samples by body site:
>>> results = anosim(bc_dm, sample_md, column='body_site', permutations=999)
>>> results['test statistic']
1.0
>>> results['p-value'] < 0.1
True
The R statistic of 1.0 indicates strong separation of samples based on body
site. The p-value is significant at an alpha of 0.1.
References
----------
.. [1] http://matplotlib.org/examples/mplot3d/scatter3d_demo.html
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from skbio.util import TestRunner
from ._base import pw_distances, pw_distances_from_table
__all__ = ["pw_distances", "pw_distances_from_table"]
test = TestRunner(__file__).test
|
jensreeder/scikit-bio
|
skbio/diversity/beta/__init__.py
|
Python
|
bsd-3-clause
| 6,898
|
[
"scikit-bio"
] |
652822557f82056d8c95f6c1f48b982b088ab799937a8ea238540708934a8530
|
# -*- coding: utf-8 -*-
# cldomain is a Common Lisp domain for the Sphinx documentation tool.
# Copyright (C) 2011-2014 Russell Sim <russell.sim@gmail.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
sphinxcontrib.cldomain
~~~~~~~~~~~~~~~~~~~~~~
The Common Lisp domain
"""
import re
import os
import sys
from os import path
import tempfile
import json
from collections import defaultdict
import operator
import subprocess
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from docutils import nodes
from docutils.statemachine import string2lines, StringList
import pprint
from sphinx import addnodes
from sphinx.util.console import red
from sphinx.locale import l_, _
from sphinx.roles import XRefRole
from sphinx.domains import Domain, ObjType
from sphinx.directives import ObjectDescription
from sphinx.util.nodes import make_refnode
from sphinx.util.compat import Directive
from sphinx.util.docfields import Field, GroupedField
__version__ = open(path.join(path.dirname(__file__),
"version.lisp-expr")).read().strip('"')
ALL_TYPES = ["macro", "function", "genericFunction",
"setf", "variable", "type"]
upper_symbols = re.compile("([^a-z\s\"`]*[A-Z]{2,}[^a-z\s\"`:]*)($|\s)")
DOC_STRINGS = defaultdict(dict, {})
TYPES = defaultdict(dict, {})
ARGS = defaultdict(dict, {})
METHODS = defaultdict(dict, {})
SLOTS = defaultdict(dict, {})
USED_SYMBOLS = defaultdict(dict, {})
lambda_list_keywords = ["&allow-other-keys", "&key",
"&rest", "&aux", "&optional"]
def node_to_dict(node):
name = getattr(node, 'tagname', node)
if getattr(node, 'rawsource', None):
return {name: node.rawsource}
nodes = {name: []}
for child in node.children:
nodes[name].append(node_to_dict(child))
return nodes
def debug_print(node):
"""Useful in pdb sessions"""
node = node_to_dict(node)
pprint.pprint(node)
def record_use(package, symbol_name, objtype):
"""Record unused package symbols."""
symbol = symbol_name.upper()
USED_SYMBOLS[package].setdefault(symbol, []).append(objtype)
def bool_option(arg):
"""Used to convert flag options to directives. (Instead of
directives.flag(), which returns None).
"""
return True
# An almost exact copy of Peter Norvig's scheme parser
# http://norvig.com/lispy.html
def _read(s):
"Read a Scheme expression from a string."
return _read_from(_tokenize(s))
def _tokenize(s):
"Convert a string into a list of tokens."
return s.replace('(', ' ( ').replace(')', ' ) ').split()
def _read_from(tokens):
"Read an expression from a sequence of tokens."
if len(tokens) == 0:
raise SyntaxError('unexpected EOF while reading')
token = tokens.pop(0)
if '(' == token:
L = []
while tokens[0] != ')':
L.append(_read_from(tokens))
tokens.pop(0) # pop off ')'
return L
elif ')' == token:
raise SyntaxError('unexpected )')
else:
return token
# end of http://norvig.com/lispy.html
def parse_specializer_symbol(symbol, package):
"""Parse symbols, for specializers"""
symbol = symbol.upper()
if symbol.startswith(":"):
return "KEYWORD" + symbol
# TODO (RS) this needs to be smarter what happens if there is an
# internal symbol instead of an external one?
if ":" not in symbol:
return package + ":" + symbol
return symbol
class desc_clparameterlist(addnodes.desc_parameterlist):
"""Node for a common lisp parameter list."""
child_text_separator = ' '
# v is short for visit
# d is short for depart
def v_clparameterlist(self, node):
self.first_param = True
self.body.append(' ')
self.body.append('(')
self.param_separator = node.child_text_separator
def d_clparameterlist(self, node):
self.body.append(')')
def v_latex_clparameterlist(self, node):
self.body.append('}{')
self.first_param = True
self.param_separator = node.child_text_separator
def d_latex_clparameterlist(self, node):
self.body.append('}{')
class desc_clparameter(addnodes.desc_parameter):
"""Node for a common lisp parameter item."""
def d_clparameter(self, node):
pass
def v_html_clparameter(self, node):
if self.body[-1] != ('('):
self.body.append(self.param_separator)
if node.hasattr('lambda_keyword'):
self.body.append('<em class="lambda_keyword text-muted">')
elif node.hasattr('keyword'):
self.body.append('<em class="keyword text-muted">')
elif not node.hasattr('noemph'):
self.body.append('<em>')
def d_html_clparameter(self, node):
if node.hasattr('lambda_keyword'):
self.body.append('</em>')
elif node.hasattr('keyword'):
self.body.append('</em>')
elif not node.hasattr('noemph'):
self.body.append('</em>')
def v_latex_clparameter(self, node):
if not self.first_param:
self.body.append(self.param_separator)
else:
self.first_param = False
if not node.hasattr('noemph'):
self.body.append(r'\emph{')
def d_latex_clparameter(self, node):
if not node.hasattr('noemph'):
self.body.append('}')
def v_texinfo_clparameter(self, node):
if not self.first_param:
self.body.append(self.param_separator)
else:
self.first_param = False
text = self.escape(node.astext())
# replace no-break spaces with normal ones
text = text.replace(u' ', '@w{ }')
self.body.append(text)
raise nodes.SkipNode
def v_text_clparameter(self, node):
if not self.first_param:
self.add_text(self.param_separator)
else:
self.first_param = False
self.add_text(node.astext())
raise nodes.SkipNode
def v_bs_html_desc_type(self, node):
self.body.append(self.param_separator)
self.body.append(self.starttag(node, 'tt', '', CLASS='desc-type'))
def d_bs_html_desc_type(self, node):
self.body.append('</tt>')
def v_html_desc_type(self, node):
self.body.append(self.param_separator)
def specializer(symbol, sexp, state, package, node_type=nodes.inline):
result = StringIO()
result.write("(")
first = True
for atom in sexp:
if first:
first = False
else:
result.write(" ")
if atom.startswith("KEYWORD:"):
result.write("(EQL :%s)" % atom.split(":")[-1])
else:
result.write(atom)
result.write(" ")
result.write(")")
result.seek(0)
xref = ':cl:generic:`%s <%s:%s>`' % \
(result.read().lower(), package, symbol)
lines = string2lines(xref)
node = node_type()
state.nested_parse(StringList(lines), 0, node)
return node
def specializer_xref(symbol, sexp, state, package, node_type=nodes.inline):
result = StringIO()
first = True
for atom in sexp:
if first:
first = False
else:
result.write(" ")
if atom.startswith("KEYWORD:"):
result.write("(EQL :%s)" % atom.split(":")[-1])
elif package:
if atom.startswith(package + ":"):
result.write(atom.split(":")[-1])
else:
result.write(atom)
else:
result.write(atom)
target = " ".join([a.lower() for a in sexp])
node = node_type()
result.seek(0)
xref = ":cl:method:`(%s) <%s %s>`" % \
(result.read().lower(), symbol, target)
lines = string2lines(xref)
state.nested_parse(StringList(lines), 0, node)
return node
def qualify_sexp(package, sexp):
"""If the sexp contains atoms that don't have a package then qualify
them.
"""
sexp_ret = []
for atom in sexp:
if atom.startswith(":"):
sexp_ret.append("keyword" + atom)
elif ":" in atom:
sexp_ret.append(atom)
else:
sexp_ret.append(package + ":" + atom)
return sexp_ret
def fieldlist_index(node):
"""Find the index of a field list in a content node."""
for i, n in enumerate(node):
if isinstance(n, nodes.field_list):
return i
def get_content_node(node):
"""Search through and find the content node from a signature."""
for subnode in node:
if isinstance(subnode, addnodes.desc):
for subsubnode in subnode:
if isinstance(subsubnode, addnodes.desc_content):
return subsubnode
class SpecializerField(Field):
"""
"""
is_grouped = True
list_type = nodes.bullet_list
def __init__(self, name, names=(), label=None, rolename=None,
can_collapse=False):
Field.__init__(self, name, names, label, True, rolename)
self.can_collapse = can_collapse
def make_field(self, domain, items):
fieldname = nodes.field_name('', self.label)
listnode = self.list_type()
for content in items:
par = nodes.paragraph()
par += content
listnode += nodes.list_item('', par)
fieldbody = nodes.field_body('', listnode)
return nodes.field('', fieldname, fieldbody)
class SEXP(object):
def __init__(self, sexp, types=None, show_defaults=False):
if not isinstance(sexp, list):
self.sexp = _read(sexp)
else:
self.sexp = sexp
self.types = types
if self.types:
for i, type in enumerate(self.types):
type_node = addnodes.pending_xref(
'', refdomain='cl', reftype='type',
reftarget=type)
# type = " " + type
type_node += addnodes.desc_type(type, type)
self.sexp[i] = [self.sexp[i], type_node]
self.show_defaults = show_defaults
self.show_defaults = True
def as_parameterlist(self, function_name):
return self.render_parameterlist(prepend_node=function_name)
def render_parameterlist(self, signode=None, prepend_node=None, sexp=None):
desc_sexplist = desc_clparameterlist()
if prepend_node:
desc_sexplist.append(prepend_node)
if signode:
signode.append(desc_sexplist)
symbol = False
for atom in sexp or self.sexp:
if isinstance(atom, list):
if self.show_defaults:
symbol = self.render_parameterlist(signode=desc_sexplist, sexp=atom)
else:
symbol = self.render_atom(atom[0], desc_sexplist)
else:
symbol = self.render_atom(atom, desc_sexplist)
return desc_sexplist
def render_atom(self, token, signode, noemph=True):
"add syntax hi-lighting to interesting atoms"
if not isinstance(token, nodes.Element):
param = desc_clparameter(token, token)
if token.lower() in lambda_list_keywords:
param["lambda_keyword"] = True
if token.startswith(":"):
param["keyword"] = True
else:
param = token
signode.append(param)
class CLsExp(ObjectDescription):
doc_field_types = [
GroupedField('parameter', label=l_('Parameters'),
names=('param', 'parameter', 'arg', 'argument',
'keyword', 'kwparam')),
Field('returnvalue', label=l_('Returns'), has_arg=False,
names=('returns', 'return')),
]
option_spec = {
'nodoc': bool_option,
'noindex': bool_option,
'noinitargs': bool_option,
}
def handle_signature(self, sig, signode):
symbol_name = []
package = self.env.temp_data.get('cl:package')
objtype = self.get_signature_prefix(sig)
sig_split = sig.split(" ")
sig = sig_split[0]
signode.append(addnodes.desc_annotation(objtype, objtype))
lisp_args = ARGS[package].get(sig.upper(), "")
function_name = addnodes.desc_name(sig, sig)
if not lisp_args.strip() and self.objtype in ["function"]:
lisp_args = "()"
if lisp_args.strip():
types = []
if self.objtype in ["method"]:
types = self.arguments[0].split(' ')[1:]
sexp = SEXP(lisp_args,
types=types,
show_defaults=self.env.app.config.cl_show_defaults)
arg_list = sexp.as_parameterlist(function_name)
signode.append(arg_list)
else:
signode.append(function_name)
# Add Slots
slots = SLOTS[package].get(sig.upper())
if slots and "noinitargs" not in self.options:
# TODO add slot details if describing a class
for slot in slots:
initarg = slot.get(u'initarg')
if initarg and initarg.lower() != 'nil':
slotarg = addnodes.literal_emphasis(slot.get(u'name'), slot.get(u'name'))
slotsig = initarg.lower() + u' '
signode.append(addnodes.desc_optional(slotsig, slotsig, slotarg))
symbol_name = sig
if not symbol_name:
raise Exception("Unknown symbol type for signature %s" % sig)
record_use(package, symbol_name, self.objtype)
return objtype.strip(), symbol_name
def get_field_list(self, node):
"""Return the node's field list, if there isn't one then
create it first."""
# Add a field list if there isn't one
if not node[1][-1].children:
node[1][-1].append(nodes.field_list())
if not isinstance(node[1][-1][0], nodes.field_list):
node[1][-1].append(nodes.field_list())
return node[1][-1][-1]
def get_index_text(self, name, type):
return _('%s (Lisp %s)') % (name.lower().split(":")[-1], type)
def get_index_name(self, name, type):
return type + ":" + name
def get_signature_prefix(self, sig):
return self.objtype + ' '
def cl_symbol_name(self):
return self.names[0][1].upper()
def add_target_and_index(self, name, sig, signode):
# node target
type, name = name
if 'cl:package' in self.env.temp_data:
package = self.options.get(
'module', self.env.temp_data.get('cl:package'))
name = package.lower() + ":" + name
else:
return
indexname = self.get_index_name(name, type)
if name not in self.state.document.ids:
signode['names'].append(name)
signode['ids'].append(indexname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
inv = self.env.domaindata['cl']['symbols']
# TODO (RS) reenable this checking based on doc and type.
# if name in inv:
# self.state_machine.reporter.warning(
# 'duplicate symbol description of %s, ' % name +
# 'other instance in ' + self.env.doc2path(inv[name][0]),
# line=self.lineno)
if name in inv:
inv[name].append((self.env.docname, self.objtype))
else:
inv[name] = [(self.env.docname, self.objtype)]
indextext = self.get_index_text(name, type)
if indextext:
self.indexnode['entries'].append(
('single', indextext, indexname, '', None)
)
def before_content(self):
if "nodoc" in self.options:
return
package = self.env.temp_data.get('cl:package')
name = self.names[0][1]
if not package:
self.state_machine.reporter.warning("No package specified for symbol %s." %
name)
return
try:
string = self.cl_doc_string()
except KeyError:
string = ""
self.state_machine.reporter.warning("Can't find symbol %s:%s" %
(package, name))
if not string:
return
lines = string2lines(string) + ['']
self.content = StringList(lines) + self.content
def cl_doc_string(self, objtype=None):
"""Resolve a symbols doc string. Will raise KeyError if the symbol
can't be found.
"""
package = self.env.temp_data.get('cl:package')
name = self.cl_symbol_name()
objtype = objtype or self.objtype
possible_strings = DOC_STRINGS[package][name]
string = possible_strings.get(objtype, "")
return string
class CLGeneric(CLsExp):
option_spec = {
'nodoc': bool_option,
'noindex': bool_option,
'nospecializers': bool_option,
}
def run_add_specializers(self, result):
package = self.env.temp_data.get('cl:package')
name = self.cl_symbol_name()
specializers = METHODS[package].get(name, {}).keys()
if specializers:
spec = nodes.bullet_list()
for s in specializers:
spec_xref = specializer_xref(package + ":" + name, s,
self.state, package)
item = nodes.list_item('', spec_xref)
spec.append(item)
field_list = self.get_field_list(result)
field_list.append(
nodes.field('',
nodes.field_name('', "Specializers"),
nodes.field_body('', spec)))
return result
def run(self):
result = super(CLGeneric, self).run()
if "nospecializers" not in self.options:
self.run_add_specializers(result)
return result
class CLMethod(CLGeneric):
option_spec = {
'nodoc': bool_option,
'noindex': bool_option,
'noinherit': bool_option,
'nospecializers': bool_option,
'linkgeneric': bool_option,
}
doc_field_types = [
Field('specializer', label=l_('Specializer'), has_arg=False,
names=('specializer')),
GroupedField('parameter', label=l_('Parameters'),
names=('param', 'parameter', 'arg', 'argument',
'keyword', 'kwparam')),
Field('returnvalue', label=l_('Returns'), has_arg=False,
names=('returns', 'return')),
]
def get_index_name(self, name, type):
package = self.env.temp_data.get('cl:package')
specializer = self.arguments
spec_args = qualify_sexp(package, specializer[0].split(" ")[1:])
specializer = " ".join(spec_args)
return type + ":" + name + "(" + specializer.lower() + ")"
def get_index_text(self, name, type):
specializer = self.arguments
spec_args = specializer[0].split(" ")[1:]
specializer = " ".join(spec_args)
return _('%s (%s) (Lisp %s)') % (name.lower().split(":")[-1],
specializer.lower(), type)
def add_target_and_index(self, name, sig, signode):
# node target
type, name = name
if 'cl:package' in self.env.temp_data:
package = self.options.get(
'module', self.env.temp_data.get('cl:package'))
name = package.lower() + ":" + name
else:
return
indexname = self.get_index_name(name, type)
if name not in self.state.document.ids:
signode['names'].append(name)
signode['ids'].append(indexname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
inv = self.env.domaindata['cl']['methods']
# TODO (RS) reenable this checking based on doc and type.
# if name in inv:
# self.state_machine.reporter.warning(
# 'duplicate symbol description of %s, ' % name +
# 'other instance in ' + self.env.doc2path(inv[name][0]),
# line=self.lineno)
sig = " ".join(qualify_sexp(package.lower(), sig.split(" ")[1:])) # trim method name
if name in inv:
inv[name][sig] = (self.env.docname, self.objtype)
else:
inv[name] = {sig: (self.env.docname, self.objtype)}
indextext = self.get_index_text(name, type)
if indextext:
self.indexnode['entries'].append(
('single', indextext, indexname, '', None)
)
def cl_doc_string(self):
"""Resolve a symbols doc string. Will raise KeyError if the symbol
can't be found.
"""
package = self.env.temp_data.get('cl:package')
name = self.cl_symbol_name()
specializer = self.arguments
spec = specializer[0].split(" ")[1:]
method_doc = METHODS[package].get(name, {})
key = tuple([parse_specializer_symbol(sym, package)
for sym in spec])
if key not in method_doc:
self.state_machine.reporter.warning("Can't find method %s:%s specializer %s, available specializers are %s" %
(package, name, key, method_doc.keys()))
doc = method_doc.get(key, "")
if doc:
return doc
if "noinherit" not in self.options:
return super(CLMethod, self).cl_doc_string("generic")
return ""
def run(self):
result = super(CLMethod, self).run()
field_list = self.get_field_list(result)
package = self.env.temp_data.get('cl:package')
if "linkgeneric" in self.options:
# TODO (RS) this will probably be removed in the future.
spec = specializer(self.cl_symbol_name(),
self.arguments[0].split()[1:],
self.state,
package=package)
field_list.append(
nodes.field('',
nodes.field_name('', "Specializer"),
nodes.field_body('', spec)))
return result
class CLCurrentPackage(Directive):
"""This directive is just to tell Sphinx that we're documenting stuff
in namespace foo.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
def run(self):
env = self.state.document.settings.env
env.temp_data['cl:package'] = self.arguments[0].upper()
#index_package(self.arguments[0].upper())
return []
class CLXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
if not has_explicit_title:
target = target.lstrip('~') # only has a meaning for the title
# if the first character is a tilde, don't display the package
if title[0:1] == '~':
symbol = title[1:].split(':')
package = symbol[0]
title = symbol[-1]
if target[0] == ":":
title = ":" + title
return title, target
class CLDomain(Domain):
"""CL language domain."""
name = 'cl'
label = 'Common Lisp'
object_types = {
'package': ObjType(l_('package'), 'package'),
'function': ObjType(l_('function'), 'function'),
'macro': ObjType(l_('macro'), 'macro'),
'variable': ObjType(l_('variable'), 'variable'),
'type': ObjType(l_('type'), 'type'),
'generic': ObjType(l_('generic'), 'generic'),
'method': ObjType(l_('method'), 'method'),
}
directives = {
'package': CLCurrentPackage,
'function': CLsExp,
'generic': CLGeneric,
'macro': CLsExp,
'variable': CLsExp,
'type': CLsExp,
'method': CLMethod,
}
roles = {
'symbol': CLXRefRole(),
'function': CLXRefRole(),
'generic': CLXRefRole(),
'macro': CLXRefRole(),
'variable': CLXRefRole(),
'type': CLXRefRole(),
'method': CLXRefRole(),
}
initial_data = {
'symbols': {},
'methods': {},
}
def clear_doc(self, docname):
for fullname, docs in self.data['symbols'].items():
for (fn, _) in docs:
if fn == docname:
del self.data['symbols'][fullname]
def find_obj(self, env, name):
"""Find a Lisp symbol for "name", perhaps using the given package
Return a list of (name, object entry) tuples.
"""
symbols = self.data['symbols']
name = name.lower()
if ":" in name:
if name in symbols:
return [(name, symbols[name])]
else:
def filter_symbols(symbol):
symbol = symbol[0]
if name == symbol:
return True
if ":" in symbol:
symbol = symbol.split(":")[1]
if name == symbol:
return True
return False
return filter(filter_symbols, symbols.items())
def find_method(self, env, name, node):
"""Find a Lisp symbol for "name", perhaps using the given package
Return a list of (name, object entry) tuples.
"""
methods = self.data['methods']
name = name.lower()
sexp = name.split(" ")
generic = sexp[0]
specializer = " ".join(sexp[1:])
if generic in methods:
if specializer in methods[generic]:
return [methods[generic][specializer]]
else:
env.warn_node('can\'t find method %s' % (name), node)
else:
env.warn_node('can\'t find generic %s' % (name), node)
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
if " " in target:
matches = self.find_method(env, target.upper(), node)
else:
matches = self.find_obj(env, target.upper())
if not matches:
return None
elif len(matches) > 1:
env.warn_node(
'more than one target found for cross-reference '
'%r: %s' % (target, ', '.join(match[0] for match in matches)),
node)
# TODO (RS) this just chooses the first symbol, instead every
# symbol should be presented.
if " " in target:
sexp = target.split(" ")
generic = sexp[0].lower()
specializer = " ".join(sexp[1:])
name = generic
filename = matches[0][0] # the first filename
link = "method" + ":" + generic + "(" + specializer + ")"
else:
name = matches[0][0] # the symbol name
filename = matches[0][1][0][0] # the first filename
type = matches[0][1][0][1] # the first type
link = type + ":" + name
return make_refnode(builder, fromdocname, filename,
link, contnode, name)
def get_symbols(self):
for refname, docs in self.data['symbols'].iteritems():
for (docname, type) in docs:
yield (refname, refname, type, docname, refname, 1)
def save_cldomain_output(output):
"""Save a copy of the clgit output for debugging."""
fd, path = tempfile.mkstemp('.log', 'cldomain-err-')
os.write(fd, output.encode('utf-8'))
os.close(fd)
return path
def index_packages(systems, system_paths, packages, quicklisp, lisps, cl_debug):
"""Call an external lisp program that will return a dictionary of doc
strings for all public symbols.
"""
cl_launch_exe = [which("cl-launch")[0]]
cl_launch_command = cl_launch_args(lisps)
cldomain_args = ["--"]
for package in packages:
cldomain_args.extend(["--package", package])
for system in systems:
cldomain_args.extend(["--system", system])
for system_path in system_paths:
cldomain_args.extend(["--path", system_path])
env = os.environ.copy()
env.update({"CLDOMAIN": path.abspath(path.dirname(__file__)) + "/",
"QUICKLISP": quicklisp})
raw_output = subprocess.check_output(cl_launch_exe
+ cl_launch_command
+ cldomain_args,
env=env)
output = "\n".join([line for line in raw_output.split("\n")
if not line.startswith(";")])
try:
lisp_data = json.loads(output)
if cl_debug:
pprint.pprint(lisp_data)
except:
dump_path = save_cldomain_output(raw_output)
error = sys.stderr
print >>error, red('A error occurred with the json output from cldomain\'s'
' lisp inspector, this has been dumped to %s if you '
'intend on submitting a bug please include this file '
'with the sphinx error log.' % dump_path)
raise
for k, v in lisp_data.items():
symbol_name = k.split(':')
package, name = symbol_name[0], symbol_name[-1]
# extract doc strings
DOC_STRINGS[package][name] = {}
for type in ALL_TYPES:
if type not in v:
continue
# XXX This isn't the best, the objtype is generic but the
# docstring will be under genericFunction because of the JSON
# encoder and changing the directive name doesn't seem to help
# either.
if type == "genericFunction":
cl_type = "generic"
else:
cl_type = type
# enable symbol references for symbols
DOC_STRINGS[package][name][cl_type] = v[type]
# extract methods
if "methods" in v:
def parse_method(method):
sexp = []
for atom in json.loads(method):
if atom.startswith("("):
eql = _read(atom)
sexp.append(eql[-1])
else:
sexp.append(atom)
return tuple(sexp)
def parse_doc(doc):
if doc is None:
doc = ""
return doc
methods = dict([(parse_method(method), parse_doc(doc))
for method, doc in v["methods"].items()])
METHODS[package][name] = methods
# extract slots
if "slots" in v:
SLOTS[package][name] = v["slots"]
def lower_symbols(text):
if '"' in text:
return text
symbol_name = text.split(':')
if len(symbol_name) > 1:
spackage, symbol = symbol_name[0], symbol_name[-1]
else:
spackage = ''
symbol = ''
if spackage.upper() in packages:
return symbol.lower()
return text.lower()
# extract arguments
packages = map(operator.methodcaller('upper'), packages)
for k, v in lisp_data.items():
spackage, symbol = k.split(':')
if not v.get("arguments"):
pass
elif v["arguments"] == "NIL":
ARGS[spackage][symbol] = ""
else:
v_arg = v["arguments"].replace('(', ' ( ').replace(')', ' ) ')
ARGS[spackage][symbol] = " ".join(map(lower_symbols,
v_arg.split(" ")))
def load_packages(app):
packages = []
systems = []
system_paths = []
if app.config.cl_packages:
app.info("DEPRECATED: The cl_packages variable has been "
"replaced by cl_systems and will be removed in the future.")
for package, system_path in app.config.cl_packages.iteritems():
packages.append(package.upper())
systems.append(package)
system_paths.append(system_path)
if app.config.cl_systems:
for system in app.config.cl_systems:
systems.append(system['name'])
if 'path' in system:
system_paths.append(system['path'])
if 'packages' in system:
for package in system['packages']:
packages.append(package.upper())
else:
packages.append(system['name'].upper())
if not packages:
app.warn("No CL packages specified.")
return
app.info("Collecting Lisp docstrings from %s..." % ', '.join(str(x) for x in systems))
index_packages(systems,
system_paths,
packages,
app.config.cl_quicklisp,
app.config.cl_lisps,
app.config.cl_debug)
def uppercase_symbols(app, docname, source):
"""For each line in a list replace all uppercase symbols with a
sphinx references"""
for i, line in enumerate(source):
source[i] = re.sub(upper_symbols,
":cl:symbol:`~\g<1>`\g<2>", line)
def list_unused_symbols(app, exception):
if exception:
return
# TODO (RS) this initial implementation will not be able to detect
# if each method specialisation has been used.
for p, sym_doc in DOC_STRINGS.items():
for s, docs in sym_doc.items():
for objtype in docs.keys():
if s in USED_SYMBOLS[p]:
if objtype == "genericFunction":
objtype = "generic"
if objtype not in USED_SYMBOLS[p][s]:
app.warn("Unused symbol doc %s:%s type %s" %
(p, s, objtype))
else:
app.warn("Unused symbol doc %s:%s type %s" %
(p, s, objtype))
def add_node(class_name, node, visit, depart=None):
"""Register a node's visitor functions with a class, if is available.
"""
def import_class(cl):
d = cl.rfind(".")
classname = cl[d+1:len(cl)]
m = __import__(cl[0:d], globals(), locals(), [classname])
return getattr(m, classname)
try:
translator = import_class(class_name)
except (ImportError, AttributeError):
return
setattr(translator, 'visit_' + node.__name__, visit)
if depart:
setattr(translator, 'depart_'+node.__name__, depart)
add_node('sphinx_bootstrap_theme.BootstrapTranslator',
desc_clparameterlist,
v_clparameterlist, d_clparameterlist)
add_node('sphinx_bootstrap_theme.BootstrapTranslator',
desc_clparameter,
v_html_clparameter, d_html_clparameter)
add_node('sphinx_bootstrap_theme.BootstrapTranslator',
addnodes.desc_type,
v_bs_html_desc_type, d_bs_html_desc_type)
add_node('sphinx.writers.html.HTMLTranslator',
addnodes.desc_type,
v_html_desc_type)
def setup(app):
app.add_domain(CLDomain)
app.add_node(desc_clparameterlist,
html=(v_clparameterlist, d_clparameterlist),
latex=(v_latex_clparameterlist, d_latex_clparameterlist),
texinfo=(v_clparameterlist, d_clparameterlist),
text=(v_clparameterlist, d_clparameterlist))
app.add_node(desc_clparameter,
html=(v_html_clparameter, d_html_clparameter),
latex=(v_latex_clparameter, d_latex_clparameter),
texinfo=(v_texinfo_clparameter, d_clparameter),
text=(v_text_clparameter, d_clparameter))
app.add_config_value('cl_packages', {}, 'env')
app.add_config_value('cl_systems', [], 'env')
app.add_config_value('cl_quicklisp', path.expandvars("$HOME/quicklisp"), 'env')
app.add_config_value('cl_show_defaults', False, True)
app.add_config_value('cl_lisps', None, 'env')
app.add_config_value('cl_debug', False, 'env')
app.connect('builder-inited', load_packages)
app.connect('build-finished', list_unused_symbols)
#app.connect('source-read', uppercase_symbols)
def which(name, flags=os.X_OK):
"""https://twistedmatrix.com/trac/browser/tags/releases/twisted-8.2.0/twisted/python/procutils.py"""
result = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ.get('PATH', None)
if path is None:
return []
for p in os.environ.get('PATH', '').split(os.pathsep):
p = os.path.join(p, name)
if os.access(p, flags):
result.append(p)
for e in exts:
pext = p + e
if os.access(pext, flags):
result.append(pext)
return result
def cl_launch_args(lisps=None,
package='sphinxcontrib.cldomain',
main_function="sphinxcontrib.cldomain:main"):
quicklisp = """
#-quicklisp
(let ((quicklisp-init (merge-pathnames (make-pathname :name "setup"
:type "lisp")
(concatenate 'string (uiop/os:getenv "QUICKLISP")
"/"))))
(if (probe-file quicklisp-init)
(load quicklisp-init)
(error "Can't Find Quicklisp at ~a~%" quicklisp-init)))
"""
system = """
(push (pathname (concatenate 'string (uiop/os:getenv \"CLDOMAIN\") \"/\"))
asdf:*central-registry*)
"""
quickload = """
(let ((*standard-output* *error-output*))
(quicklisp:quickload '%s))
""" % package
args = []
if lisps:
args.extend(["--lisp", lisps])
args.extend(["--init", quicklisp,
"--init", system,
"--init", "(asdf:initialize-source-registry)",
"--init", "(asdf:require-system :quicklisp)",
"--init", quickload,
"--init", "(%s)" % main_function])
return args
|
russell/sphinxcontrib-cldomain
|
sphinxcontrib/cldomain.py
|
Python
|
gpl-3.0
| 38,578
|
[
"VisIt"
] |
e5efe447587e3da61f8246f47c1f66119b8ef65f2d014f355ea386648278b300
|
from sklearn.linear_model import LinearRegression as _sklearn_LinearRegression
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn import preprocessing
from sklearn.base import TransformerMixin
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C, RationalQuadratic as RQ
from sklearn.base import RegressorMixin, BaseEstimator
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression, mutual_info_regression
from sklearn.exceptions import DataConversionWarning
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
from .selectors import SelectNAndKBest
from . import feature_concat
from . import LinearAndGaussianProcessRegression, GaussianProcessRegressor_, ignore_warnings
import numpy, pandas
import scipy.stats
import warnings
import contextlib
from sklearn.multioutput import MultiOutputRegressor as MultiOutputRegressor_
class CrossValMixin:
def cross_val_scores(self, X, Y, cv=3):
p = self.cross_val_predict(X, Y, cv=cv)
return pandas.Series(
r2_score(Y, p, sample_weight=None, multioutput='raw_values'),
index=Y.columns
)
def cross_val_predict(self, X, Y, cv=3):
if isinstance(Y, pandas.DataFrame):
self.Y_columns = Y.columns
Yix = Y.index
elif isinstance(Y, pandas.Series):
self.Y_columns = [Y.name]
Yix = Y.index
else:
self.Y_columns = ["Untitled" * Y.shape[1]]
Yix = pandas.RangeIndex(Y.shape[0])
with ignore_warnings(DataConversionWarning):
p = cross_val_predict(self, X, Y, cv=cv)
return pandas.DataFrame(p, columns=self.Y_columns, index=Yix)
class MultiOutputRegressor(
MultiOutputRegressor_,
CrossValMixin
):
pass
class SingleTargetRegression(
BaseEstimator,
RegressorMixin,
):
def __init__(self, core_features=None, keep_other_features=3, detrend=True, expected_features=None):
"""
Parameters
----------
core_features
feature columns to definitely keep for both LR and GPR
"""
self.core_features = core_features
self.keep_other_features = keep_other_features
self.lr = LinearRegression()
self.gpr = GaussianProcessRegressor_(n_restarts_optimizer=9)
self.y_residual = None
self.kernel_generator = lambda dims: C() * RBF([1.0] * dims)
self.use_linear = detrend
self.expected_features = expected_features
def _feature_selection(self, X, y=None):
"""
Parameters
----------
X : pandas.DataFrame
y : ndarray
If given, the SelectKBest feature selector will be re-fit to find the best features. If not given,
then the previously fit SelectKBest will be used; if it has never been fit, an error is raised.
Returns
-------
pandas.DataFrame
Contains all the core features plus the K best other features.
"""
if not isinstance(X, pandas.DataFrame):
#raise TypeError('must use pandas.DataFrame for X')
X = pandas.DataFrame(X, columns=self.expected_features)
if self.core_features is None:
return X
X_core = X.loc[:,self.core_features]
X_other = X.loc[:, X.columns.difference(self.core_features)]
if X_other.shape[1] <= self.keep_other_features:
return X
# If self.keep_other_features is zero, there is no feature selecting to do and we return only the core.
if self.keep_other_features == 0:
return X_core
if y is not None:
self.feature_selector = SelectKBest(mutual_info_regression, k=self.keep_other_features).fit(X_other, y)
X_other = pandas.DataFrame(
self.feature_selector.transform(X_other),
columns=X_other.columns[self.feature_selector.get_support()],
index=X_other.index,
)
return pandas.concat([X_core, X_other], axis=1)
def fit(self, X, y):
"""
Fit linear and gaussian model.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data
y : numpy array of shape [n_samples, n_targets]
Target values.
Returns
-------
self : returns an instance of self.
"""
# print("META FIT on",len(X))
if not isinstance(X, pandas.DataFrame):
#raise TypeError('must use pandas.DataFrame for X')
X = pandas.DataFrame(X, columns=self.expected_features)
with ignore_warnings(DataConversionWarning):
if isinstance(y, pandas.DataFrame):
self.Y_columns = y.columns
elif isinstance(y, pandas.Series):
self.Y_columns = [y.name]
else:
self.Y_columns = None
X_core_plus = self._feature_selection(X, y)
if self.use_linear:
self.lr.fit(X_core_plus, y)
self.y_residual = y - self.lr.predict(X_core_plus)
else:
self.y_residual = y
dims = X_core_plus.shape[1]
self.gpr.kernel = self.kernel_generator(dims)
self.gpr.fit(X_core_plus, self.y_residual)
# print(self.y_residual.values[0])
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the model
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
C : array, shape = (n_samples,)
Returns predicted values.
"""
if not isinstance(X, pandas.DataFrame):
#raise TypeError('must use pandas.DataFrame for X')
X = pandas.DataFrame(X, columns=self.expected_features)
X_core_plus = self._feature_selection(X)
if self.use_linear:
y_hat_lr = self.lr.predict(X=X_core_plus)
else:
y_hat_lr = 0
if return_std:
y_hat_gpr, y_hat_std = self.gpr.predict(X_core_plus, return_std=True)
if self.Y_columns is not None:
y_result = pandas.DataFrame(
y_hat_lr + y_hat_gpr,
columns=self.Y_columns,
index=X.index,
)
else:
y_result = y_hat_lr + y_hat_gpr
return y_result, y_hat_std
else:
y_hat_gpr = self.gpr.predict(X_core_plus)
if self.Y_columns is not None:
y_result = pandas.DataFrame(
y_hat_lr + y_hat_gpr,
columns=self.Y_columns,
index=X.index,
)
else:
y_result = y_hat_lr + y_hat_gpr
return y_result
def cross_val_scores(self, X, Y, cv=3):
p = self.cross_val_predict(X, Y, cv=cv)
return pandas.Series(
r2_score(Y, p, sample_weight=None, multioutput='raw_values'),
index=Y.columns
)
def cross_val_predict(self, X, y, cv=3):
with ignore_warnings(DataConversionWarning):
X_core_plus = self._feature_selection(X, y)
total = cross_val_predict(self, X_core_plus, y, cv=cv)
return pandas.DataFrame(
total,
index=y.index,
columns=y.columns,
)
def SingleTargetRegressions(*args, **kwargs):
return MultiOutputRegressor(GaussianProcessRegressor_(*args, **kwargs))
class ChainedTargetRegression(
BaseEstimator,
RegressorMixin,
CrossValMixin,
):
def __init__(self, keep_other_features=3, step2_cv_folds=5, randomize_chain=True):
"""
Parameters
----------
core_features
feature columns to definitely keep for both LR and GPR
"""
self.keep_other_features = keep_other_features
self.step1 = GaussianProcessRegressor_()
self.step2_cv_folds = step2_cv_folds
self.randomize_chain = randomize_chain
def fit(self, X, Y):
"""
Fit linear and gaussian model.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data
y : numpy array of shape [n_samples, n_targets]
Target values.
Returns
-------
self : returns an instance of self.
"""
with ignore_warnings(DataConversionWarning):
if isinstance(Y, pandas.DataFrame):
self.Y_columns = Y.columns
Y_ = Y.values
elif isinstance(Y, pandas.Series):
self.Y_columns = [Y.name]
Y_ = Y.values.reshape(-1,1)
else:
self.Y_columns = ["Untitled" * Y.shape[1]]
Y_ = Y
Yhat = pandas.DataFrame(
index=X.index,
columns=self.Y_columns,
)
self.steps = []
self._chain_order = numpy.arange(Y.shape[1])
if self.randomize_chain is not None and self.randomize_chain is not False:
if self.randomize_chain is not True:
numpy.random.seed(self.randomize_chain)
numpy.random.shuffle(self._chain_order)
for meta_n in range(Y.shape[1]):
n = self._chain_order[meta_n]
self.steps.append(
make_pipeline(
SelectNAndKBest(n=X.shape[1], k=self.keep_other_features),
GaussianProcessRegressor(),
).fit(
feature_concat(X, Yhat.iloc[:,:meta_n]),
Y_[:,n]
)
)
Yhat.iloc[:, meta_n] = cross_val_predict(
self.steps[-1],
feature_concat(X, Yhat.iloc[:,:meta_n]),
Y_[:,n],
cv=self.step2_cv_folds,
)
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the model
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
C : array, shape = (n_samples,)
Returns predicted values.
"""
if isinstance(X, (pandas.DataFrame, pandas.Series)):
x_ix = X.index
else:
x_ix = pandas.RangeIndex(X.shape[0])
Yhat = pandas.DataFrame(
index=x_ix,
columns=self.Y_columns,
)
if return_std:
Ystd = pandas.DataFrame(
index=x_ix,
columns=self.Y_columns,
)
for n, col in enumerate(self.Y_columns):
y1, y2 = self.steps[n].predict(
feature_concat(X, Yhat.iloc[:, :n]),
return_std=True
)
Yhat.iloc[:, n] = y1
Ystd.iloc[:, n] = y2
return Yhat, Ystd
else:
for n, col in enumerate(self.Y_columns):
y1 = self.steps[n].predict(
feature_concat(X, Yhat.iloc[:, :n]),
)
Yhat.iloc[:, n] = y1
return Yhat
def cross_val_scores(self, X, Y, cv=3):
p = self.cross_val_predicts(X, Y, cv=cv)
return pandas.Series(
r2_score(Y, p, sample_weight=None, multioutput='raw_values'),
index=Y.columns
)
def cross_val_predicts(self, X, Y, cv=3, alt_y=None):
with ignore_warnings(DataConversionWarning):
p = cross_val_predict(self, X, Y, cv=cv)
return pandas.DataFrame(p, columns=Y.columns, index=Y.index)
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the residual
sum of squares ((y_true - y_pred) ** 2).sum() and v is the total
sum of squares ((y_true - y_true.mean()) ** 2).sum().
The best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
return r2_score(y, self.predict(X), sample_weight=sample_weight,
multioutput='raw_values').mean()
class EnsembleRegressorChains(
BaseEstimator,
RegressorMixin,
CrossValMixin,
):
def __init__(self, keep_other_features=3, step2_cv_folds=5, replication=10):
self.replication = replication
self.keep_other_features = keep_other_features
self.step2_cv_folds = step2_cv_folds
self.ensemble = [
ChainedTargetRegression(
keep_other_features=keep_other_features,
step2_cv_folds=step2_cv_folds,
randomize_chain=n,
)
for n in range(self.replication)
]
def fit(self, X, Y):
for c in self.ensemble:
c.fit(X,Y)
return self
def predict(self, X):
result = self.ensemble[0].predict(X)
for c in self.ensemble[1:]:
result += c.predict(X)
result /= len(self.ensemble)
return result
class StackedSingleTargetRegression(
BaseEstimator,
RegressorMixin,
CrossValMixin,
):
def __init__(
self,
keep_other_features=3,
step2_cv_folds=5,
):
"""
Parameters
----------
keep_other_features : int
The number of other (derived) feature columns to keep. Keeping this
number small help prevent overfitting problems if the number of
output features is large.
step2_cv_folds : int
The step 1 cross validation predictions are used in step two. How many
CV folds?
"""
self.keep_other_features = keep_other_features
self.step2_cv_folds = step2_cv_folds
def fit(self, X, Y):
"""
Fit linear and gaussian model.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data
y : numpy array of shape [n_samples, n_targets]
Target values.
Returns
-------
self : returns an instance of self.
"""
with ignore_warnings(DataConversionWarning):
self.step1 = MultiOutputRegressor(GaussianProcessRegressor())
Y_cv = cross_val_predict(self.step1, X, Y, cv=self.step2_cv_folds)
self.step1.fit(X, Y)
self.step2 = MultiOutputRegressor(
make_pipeline(
SelectNAndKBest(n=X.shape[1], k=self.keep_other_features),
GaussianProcessRegressor(),
)
)
self.step2.fit(feature_concat(X, Y_cv), Y)
if isinstance(Y, pandas.DataFrame):
self.Y_columns = Y.columns
elif isinstance(Y, pandas.Series):
self.Y_columns = Y.name
else:
self.Y_columns = None
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the model
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
C : array, shape = (n_samples,)
Returns predicted values.
"""
Yhat1 = self.step1.predict(X)
Yhat2 = self.step2.predict(feature_concat(X, Yhat1))
# for n, col in enumerate(self.Y_columns):
# temp = self.step2[n].predict(
# pandas.concat([X, Yhat1], axis=1),
# )
# Yhat2.iloc[:, n] = temp
return Yhat2
def cross_val_scores(self, X, Y, cv=3):
p = self.cross_val_predicts(X, Y, cv=cv)
return pandas.Series(
r2_score(Y, p, sample_weight=None, multioutput='raw_values'),
index=Y.columns
)
def cross_val_predicts(self, X, Y, cv=3, alt_y=None):
if not isinstance(X, pandas.DataFrame):
raise TypeError('must use pandas.DataFrame for X')
if not isinstance(Y, pandas.DataFrame):
raise TypeError('must use pandas.DataFrame for Y')
with ignore_warnings(DataConversionWarning):
p = cross_val_predict(self, X, Y, cv=cv)
return pandas.DataFrame(p, columns=Y.columns, index=Y.index)
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the residual
sum of squares ((y_true - y_pred) ** 2).sum() and v is the total
sum of squares ((y_true - y_true.mean()) ** 2).sum().
The best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
return r2_score(y, self.predict(X), sample_weight=sample_weight,
multioutput='raw_values').mean()
class DetrendMixin:
def detrend_fit(self, X, Y):
self._lr = LinearRegression()
self._lr.fit(X, Y)
residual = Y - self._lr.predict(X)
return residual
def detrend_predict(self, X):
Yhat1 = self._lr.predict(X)
return Yhat1
class DetrendedStackedSingleTargetRegression(
StackedSingleTargetRegression,
DetrendMixin
):
def fit(self, X, Y):
return super().fit(X, self.detrend_fit(X,Y))
def predict(self, X, return_std=False, return_cov=False):
return self.detrend_predict(X) + super().predict(X)
class DetrendedChainedTargetRegression(
ChainedTargetRegression,
DetrendMixin
):
def fit(self, X, Y):
return super().fit(X, self.detrend_fit(X,Y))
def predict(self, X, return_std=False, return_cov=False):
return self.detrend_predict(X) + super().predict(X)
class DetrendedEnsembleRegressorChains(
EnsembleRegressorChains,
DetrendMixin
):
def fit(self, X, Y):
return super().fit(X, self.detrend_fit(X,Y))
def predict(self, X, return_std=False, return_cov=False):
return self.detrend_predict(X) + super().predict(X)
|
jpn--/pines
|
pines/gpr/multitarget.py
|
Python
|
mit
| 16,391
|
[
"Gaussian"
] |
74e5526ef89c2a5b913980448293fc88db233b2c1285966b2336974097715bd2
|
__author__ = 'yurib'
import networkx as nx
import numpy as np
import random
from scipy.special import expit
from commons import WEIGHT, BIAS
class MatrixNN(object):
def __init__(self, layers, seed=None, zeros=False):
if seed:
np.random.seed(seed)
matrix = np.zeros if zeros else np.random.random
self.weight = [matrix((ins, outs)) for ins, outs in zip(layers[:-1], layers[1:])]
self.bias = [matrix((1, cols)) for cols in layers[1:]]
self.layers = layers
def activate(self, ins):
assert len(ins) == self.layers[0]
outs = self.sigmoid(np.array(ins[:]).reshape((1, self.layers[0])))
for w, b in zip(self.weight, self.bias):
outs = self.sigmoid(outs.dot(w) + b)
return outs
sigmoid = np.vectorize(expit)
def __repr__(self):
return 'layers:\n%s\nweights:\n%s\nbiases:\n%s\n' % (self.layers,self.weight,self.bias)
class Neuron(object):
sigmoid = expit
def __init__(self,id,g,activation=sigmoid):
self.activation = activation
self.graph = g
self.output = None
self.id = id
def activate(self):
ins = self.graph.in_edges(self)
if not ins:
return
vs = [s.output for s,_ in ins]
ws = [self.graph[s][t][WEIGHT] for s,t in ins]
#bs = [self.graph[s][t][BIAS] for s,t in ins]
# todo: fix - bias is a node property!
bs = [0]*len(ins)
self.output = self.activation(sum(v*w+b for v,w,b in zip(vs,ws,bs)))
return self.output
class NN(object):
def __init__(self, g, inputs, outputs):
self.graph = g.copy()
# map graph nodes to neurons
nodes = {n:Neuron(n,self.graph) for n in self.graph.nodes()}
nx.relabel_nodes(self.graph,nodes,copy=False)
# remember input and output nodes
self.inputs = [nodes[n] for n in inputs]
self.outputs = [nodes[n] for n in outputs]
# assign random weights and bias if they don't exist
for s,t in self.graph.edges():
if WEIGHT not in self.graph[s][t]:
self.graph[s][t][WEIGHT] = random.random()
if BIAS not in self.graph[s][t]:
self.graph[s][t][BIAS] = random.random()
def activate(self,inputs):
assert len(self.inputs) == len(inputs)
topo_nodes = nx.topological_sort(self.graph)
for node, input in zip(self.inputs,inputs):
node.output = input
for node in topo_nodes:
node.activate()
return [n.output for n in self.outputs]
if __name__ == '__main__':
g = nx.DiGraph()
g.add_nodes_from(range(5))
g.add_edges_from([(0,3),(1,3),(2,4)])
net = NN(g,[0,1,2],[3,4])
print net
print 'output:\n', net.activate([5,1,2])
|
yuribak/pyneat
|
net/net.py
|
Python
|
gpl-2.0
| 2,820
|
[
"NEURON"
] |
d8fda7c871b27906d904513a7c6384bdf8ca029095a4f9b91c1ed70a54742418
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
"""Test of fix for bug 570566 (Orca goes silent when navigating to
uneditable text from an ARIA widget) using Firefox."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
########################################################################
# We wait for the focus to be on the Firefox window as well as for focus
# to move to the "Editor Test" frame.
#
sequence.append(WaitForWindowActivate(utils.firefoxFrameNames, None))
########################################################################
# Load the editor test demo.
#
sequence.append(KeyComboAction("<Control>l"))
sequence.append(WaitForFocus(acc_role=pyatspi.ROLE_ENTRY))
sequence.append(TypeAction(utils.DojoNightlyURLPrefix + "editor/test_Editor.html"))
sequence.append(KeyComboAction("Return"))
sequence.append(WaitForDocLoad())
sequence.append(WaitForFocus("Editor Test",
acc_role=pyatspi.ROLE_DOCUMENT_FRAME))
########################################################################
# Extra loading time.
#
sequence.append(PauseAction(10000))
########################################################################
# Up Arrow to the heading above, and continue Up Arrowing to the top of
# the page.
#
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"1. Up Arrow",
["BRAILLE LINE: 'ToolBar'",
" VISIBLE: 'ToolBar', cursor=1",
"SPEECH OUTPUT: 'tool bar'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"2. Up Arrow",
["BRAILLE LINE: 'No plugins, initially empty h2'",
" VISIBLE: 'No plugins, initially empty h2', cursor=1",
"SPEECH OUTPUT: 'No plugins, initially empty heading level 2'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"3. Up Arrow",
["BRAILLE LINE: 'Editor + Plugins Test h1'",
" VISIBLE: 'Editor + Plugins Test h1', cursor=1",
"SPEECH OUTPUT: 'Editor + Plugins Test heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"4. Up Arrow",
["BUG? - The braille is not ideal, nor does it jive with the speech.",
"BRAILLE LINE: '<x> CheckBox<x> CheckBox<x> CheckBox<x> CheckBox<x> CheckBox'",
" VISIBLE: 'CheckBox<x> CheckBox<x> CheckBox', cursor=1",
"SPEECH OUTPUT: 'blank'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"5. Up Arrow",
["BRAILLE LINE: 'Focus:<x> CheckBox Value:<x> CheckBox Change:<x> CheckBox Blur:<x> CheckBox Disabled:<x> CheckBox'",
" VISIBLE: 'Focus:<x> CheckBox Value:<x> Che', cursor=1",
"SPEECH OUTPUT: 'Focus: check box checked grayed Value: check box checked grayed Change: check box checked grayed Blur: check box checked grayed Disabled: check box checked grayed ",
"'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"6. Up Arrow",
["BRAILLE LINE: 'Automated Test - all check boxes should be checked'",
" VISIBLE: 'Automated Test - all check boxes', cursor=1",
"SPEECH OUTPUT: 'Automated Test - all check boxes should be checked'"]))
########################################################################
# Close the demo
#
sequence.append(KeyComboAction("<Control>l"))
sequence.append(WaitForFocus(acc_role=pyatspi.ROLE_ENTRY))
sequence.append(TypeAction("about:blank"))
sequence.append(KeyComboAction("Return"))
sequence.append(WaitForDocLoad())
# Just a little extra wait to let some events get through.
#
sequence.append(PauseAction(3000))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
h4ck3rm1k3/orca-sonar
|
test/keystrokes/firefox/dojo_bug_570566.py
|
Python
|
lgpl-2.1
| 4,031
|
[
"ORCA"
] |
93898dc8edd3848cd028655cc86c8aa0ed7437ed674e50931d548e5a032db2bf
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import mock
import uuid
from oslo.config import cfg
from nova.compute import flavors
from nova.compute import resource_tracker
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova.objects import base as obj_base
from nova.objects import migration as migration_obj
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import rpc
from nova import test
from nova.tests.compute.monitors import test_monitors
from nova.tests.objects import test_migration
from nova.virt import driver
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
self.pci_devices = [{
'label': 'forza-napoli',
'dev_type': 'foo',
'compute_node_id': 1,
'address': '0000:00:00.1',
'product_id': 'p1',
'vendor_id': 'v1',
'status': 'available',
'extra_k1': 'v1'}] if self.pci_support else []
self.pci_stats = [{
'count': 1,
'vendor_id': 'v1',
'product_id': 'p1',
'extra_info': {'extra_k1': 'v1'}}] if self.pci_support else []
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
return d
def estimate_instance_overhead(self, instance_info):
instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._instance_types = {}
self.stubs.Set(self.conductor.db,
'instance_get_all_by_host_and_node',
self._fake_instance_get_all_by_host_and_node)
self.stubs.Set(self.conductor.db,
'instance_update_and_get_original',
self._fake_instance_update_and_get_original)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
self.host = 'fakehost'
def _create_compute_node(self, values=None):
compute = {
"id": 1,
"service_id": 1,
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"stats": [{"key": "num_instances", "value": "1"}],
"hypervisor_hostname": "fakenode",
}
if values:
compute.update(values)
return compute
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
}
return service
def _fake_instance_system_metadata(self, instance_type, prefix=''):
sys_meta = []
for key in flavors.system_metadata_flavor_props.keys():
sys_meta.append({'key': '%sinstance_type_%s' % (prefix, key),
'value': instance_type[key]})
return sys_meta
def _fake_instance(self, stash=True, flavor=None, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
sys_meta = self._fake_instance_system_metadata(flavor)
if stash:
# stash instance types in system metadata.
sys_meta = (sys_meta +
self._fake_instance_system_metadata(flavor, 'new_') +
self._fake_instance_system_metadata(flavor, 'old_'))
instance_uuid = str(uuid.uuid1())
instance = {
'uuid': instance_uuid,
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': sys_meta,
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'scheduled_at': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
}
instance.update(kwargs)
self._instances[instance_uuid] = instance
return instance
def _fake_flavor_create(self, **kwargs):
instance_type = {
'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'disabled': False,
'is_public': True,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor',
'extra_specs': {},
}
instance_type.update(**kwargs)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_all_by_host_and_node(self, context, host, nodename):
return [i for i in self._instances.values() if i['host'] == host]
def _fake_flavor_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_instance_update_and_get_original(self, context, instance_uuid,
values):
instance = self._instances[instance_uuid]
instance.update(values)
# the test doesn't care what the original instance values are, it's
# only used in the subsequent notification:
return (instance, instance)
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertIsNone(self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_context_claim(self):
# instance context manager variation:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance()
instance_type = self._fake_flavor_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance()
instance_type = self._fake_flavor_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingServiceTestCase(BaseTestCase):
def setUp(self):
super(MissingServiceTestCase, self).setUp()
self.context = context.get_admin_context()
self.tracker = self._tracker()
def test_missing_service(self):
self.tracker.update_available_resource(self.context)
self.assertTrue(self.tracker.disabled)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node()
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def test_create_compute_node(self):
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.updated = False
self.deleted = False
self.tracker = self._tracker()
self._migrations = {}
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'compute_node_delete',
self._fake_compute_node_delete)
self.stubs.Set(db, 'migration_update',
self._fake_migration_update)
self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
self.tracker.update_available_resource(self.context)
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.compute = self._create_compute_node()
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.updated = True
values['stats'] = [{"key": "num_instances", "value": "1"}]
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted', 'error']
migrations = []
for migration in self._migrations.values():
migration = obj_base.obj_to_primitive(migration)
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = self._migrations.values()[0]
migration.update(values)
return migration
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus
}
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = tracker.compute_node[field]
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb'])
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb'])
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
self.assertEqual(driver.pci_stats,
jsonutils.loads(self.tracker.compute_node['pci_stats']))
class TrackerPciStatsTestCase(BaseTrackerTestCase):
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
self.assertEqual(driver.pci_stats,
jsonutils.loads(self.tracker.compute_node['pci_stats']))
def _driver(self):
return FakeVirtDriver(pci_support=True)
class InstanceClaimTestCase(BaseTrackerTestCase):
def test_update_usage_only_for_tracked(self):
flavor = self._fake_flavor_create()
claim_mem = flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD
claim_gb = flavor['root_gb'] + flavor['ephemeral_gb']
instance = self._fake_instance(flavor=flavor, task_state=None)
self.tracker.update_usage(self.context, instance)
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'current_workload')
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertNotEqual(0, claim.memory_mb)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
# now update should actually take effect
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(1, 'current_workload')
def test_claim_and_audit(self):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
instance = self._fake_instance(memory_mb=claim_mem, root_gb=claim_disk,
ephemeral_gb=0)
self.tracker.instance_claim(self.context, instance, self.limits)
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["memory_mb"])
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute["free_ram_mb"])
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["local_gb"])
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute["free_disk_gb"])
# 1st pretend that the compute operation finished and claimed the
# desired resources from the virt layer
driver = self.tracker.driver
driver.memory_mb_used = claim_mem
driver.local_gb_used = claim_disk
self.tracker.update_available_resource(self.context)
# confirm tracker is adding in host_ip
self.assertIsNotNone(self.compute.get('host_ip'))
# confirm that resource usage is derived from instance usages,
# not virt layer:
self.assertEqual(claim_mem_total, self.compute['memory_mb_used'])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute['free_ram_mb'])
self.assertEqual(claim_disk, self.compute['local_gb_used'])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute['free_disk_gb'])
def test_claim_and_abort(self):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
instance = self._fake_instance(memory_mb=claim_mem,
root_gb=claim_disk, ephemeral_gb=0)
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertIsNotNone(claim)
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute["free_ram_mb"])
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute["free_disk_gb"])
claim.abort()
self.assertEqual(0, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["free_ram_mb"])
self.assertEqual(0, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["free_disk_gb"])
def test_instance_claim_with_oversubscription(self):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
'disk_gb': root_gb * 2,
'vcpu': vcpus}
instance = self._fake_instance(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb)
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(root_gb * 2,
self.tracker.compute_node['local_gb_used'])
def test_additive_claims(self):
self.limits['vcpu'] = 2
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=1, ephemeral_gb=0)
instance = self._fake_instance(flavor=flavor)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance(flavor=flavor)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
self.tracker.compute_node['local_gb_used'])
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node['vcpus_used'])
def test_context_claim_with_exception(self):
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
def test_instance_context_claim(self):
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=2, ephemeral_gb=3)
instance = self._fake_instance(flavor=flavor)
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
# after exiting claim context, build is marked as finished. usage
# totals should be same:
self.tracker.update_available_resource(self.context)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
def test_update_load_stats_for_instance(self):
instance = self._fake_instance(task_state=task_states.SCHEDULING)
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node['current_workload'])
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
def test_cpu_stats(self):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
vcpus = 1
instance = self._fake_instance(vcpus=vcpus)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
add_vcpus = 10
vcpus += add_vcpus
instance = self._fake_instance(vcpus=add_vcpus)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
vcpus -= add_vcpus
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
class ResizeClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
def _fake_migration_create(mig_self, ctxt):
self._migrations[mig_self.instance_uuid] = mig_self
mig_self.obj_reset_changes()
self.stubs.Set(migration_obj.Migration, 'create',
_fake_migration_create)
self.instance = self._fake_instance()
self.instance_type = self._fake_flavor_create()
def _fake_migration_create(self, context, values=None):
instance_uuid = str(uuid.uuid1())
mig_dict = test_migration.fake_db_migration()
mig_dict.update({
'id': 1,
'source_compute': 'host1',
'source_node': 'fakenode',
'dest_compute': 'host2',
'dest_node': 'fakenode',
'dest_host': '127.0.0.1',
'old_instance_type_id': 1,
'new_instance_type_id': 2,
'instance_uuid': instance_uuid,
'status': 'pre-migrating',
'updated_at': timeutils.utcnow()
})
if values:
mig_dict.update(values)
migration = migration_obj.Migration()
migration.update(mig_dict)
# This hits the stub in setUp()
migration.create('fake')
def test_claim(self):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_migrations))
def test_abort(self):
try:
with self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits):
raise test.TestingException("abort")
except test.TestingException:
pass
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
def test_additive_claims(self):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
2 * FAKE_VIRT_LOCAL_GB,
2 * FAKE_VIRT_VCPUS)
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, limits)
instance2 = self._fake_instance()
self.tracker.resize_claim(self.context, instance2, self.instance_type,
limits)
self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
def test_claim_and_audit(self):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
def test_same_host(self):
self.limits['vcpu'] = 3
src_dict = {
'memory_mb': 1, 'root_gb': 1, 'ephemeral_gb': 0, 'vcpus': 1}
dest_dict = dict((k, v + 1) for (k, v) in src_dict.iteritems())
src_type = self._fake_flavor_create(
id=10, name="srcflavor", **src_dict)
dest_type = self._fake_flavor_create(
id=11, name="destflavor", **dest_dict)
# make an instance of src_type:
instance = self._fake_instance(flavor=src_type)
instance['system_metadata'] = self._fake_instance_system_metadata(
dest_type)
self.tracker.instance_claim(self.context, instance, self.limits)
# resize to dest_type:
claim = self.tracker.resize_claim(self.context, instance,
dest_type, self.limits)
self._assert(src_dict['memory_mb'] + dest_dict['memory_mb']
+ 2 * FAKE_VIRT_MEMORY_OVERHEAD, 'memory_mb_used')
self._assert(src_dict['root_gb'] + src_dict['ephemeral_gb']
+ dest_dict['root_gb'] + dest_dict['ephemeral_gb'],
'local_gb_used')
self._assert(src_dict['vcpus'] + dest_dict['vcpus'], 'vcpus_used')
self.tracker.update_available_resource(self.context)
claim.abort()
# only the original instance should remain, not the migration:
self._assert(src_dict['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
'memory_mb_used')
self._assert(src_dict['root_gb'] + src_dict['ephemeral_gb'],
'local_gb_used')
self._assert(src_dict['vcpus'], 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
def test_revert(self):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self.tracker.drop_resize_claim(self.instance)
self.assertEqual(0, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
def test_revert_reserve_source(self):
# if a revert has started at the API and audit runs on
# the source compute before the instance flips back to source,
# resources should still be held at the source based on the
# migration:
dest = "desthost"
dest_tracker = self._tracker(host=dest)
dest_tracker.update_available_resource(self.context)
self.instance = self._fake_instance(memory_mb=FAKE_VIRT_MEMORY_MB,
root_gb=FAKE_VIRT_LOCAL_GB, ephemeral_gb=0,
vcpus=FAKE_VIRT_VCPUS, instance_type_id=1)
values = {'source_compute': self.host, 'dest_compute': dest,
'old_instance_type_id': 1, 'new_instance_type_id': 1,
'status': 'post-migrating',
'instance_uuid': self.instance['uuid']}
self._fake_migration_create(self.context, values)
# attach an instance to the destination host tracker:
dest_tracker.instance_claim(self.context, self.instance)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD,
'memory_mb_used', tracker=dest_tracker)
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
tracker=dest_tracker)
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
tracker=dest_tracker)
# audit and recheck to confirm migration doesn't get double counted
# on dest:
dest_tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD,
'memory_mb_used', tracker=dest_tracker)
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
tracker=dest_tracker)
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
tracker=dest_tracker)
# apply the migration to the source host tracker:
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
# flag the instance and migration as reverting and re-audit:
self.instance['vm_state'] = vm_states.RESIZED
self.instance['task_state'] = task_states.RESIZE_REVERTING
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_MB + 1, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
def test_resize_filter(self):
instance = self._fake_instance(vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING)
self.assertFalse(self.tracker._instance_in_resize_state(instance))
instance = self._fake_instance(vm_state=vm_states.RESIZED,
task_state=task_states.SUSPENDING)
self.assertTrue(self.tracker._instance_in_resize_state(instance))
states = [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH]
for vm_state in [vm_states.ACTIVE, vm_states.STOPPED]:
for task_state in states:
instance = self._fake_instance(vm_state=vm_state,
task_state=task_state)
result = self.tracker._instance_in_resize_state(instance)
self.assertTrue(result)
def test_dupe_filter(self):
instance = self._fake_instance(host=self.host)
values = {'source_compute': self.host, 'dest_compute': self.host,
'instance_uuid': instance['uuid'], 'new_instance_type_id': 2}
self._fake_migration_create(self.context, values)
self._fake_migration_create(self.context, values)
self.tracker.update_available_resource(self.context)
self.assertEqual(1, len(self.tracker.tracked_migrations))
def test_set_instance_host_and_node(self):
instance = self._fake_instance()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])
self.assertIsNone(instance['node'])
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class NoInstanceTypesInSysMetadata(ResizeClaimTestCase):
"""Make sure we handle the case where the following are true:
1) Compute node C gets upgraded to code that looks for instance types in
system metadata. AND
2) C already has instances in the process of migrating that do not have
stashed instance types.
bug 1164110
"""
def setUp(self):
super(NoInstanceTypesInSysMetadata, self).setUp()
self.instance = self._fake_instance(stash=False)
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
class ComputeMonitorTestCase(BaseTestCase):
def setUp(self):
super(ComputeMonitorTestCase, self).setUp()
fake_monitors = [
'nova.tests.compute.monitors.test_monitors.FakeMonitorClass1',
'nova.tests.compute.monitors.test_monitors.FakeMonitorClass2']
self.flags(compute_available_monitors=fake_monitors)
self.tracker = self._tracker()
self.node_name = 'nodename'
self.user_id = 'fake'
self.project_id = 'fake'
self.info = {}
self.context = context.RequestContext(self.user_id,
self.project_id)
def test_get_host_metrics_none(self):
self.flags(compute_monitors=['FakeMontorClass1', 'FakeMonitorClass4'])
self.tracker.monitors = []
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertEqual(len(metrics), 0)
def test_get_host_metrics_one_failed(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass4'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class4 = test_monitors.FakeMonitorClass4(self.tracker)
self.tracker.monitors = [class1, class4]
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertTrue(len(metrics) > 0)
def test_get_host_metrics(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass2'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class2 = test_monitors.FakeMonitorClass2(self.tracker)
self.tracker.monitors = [class1, class2]
mock_notifier = mock.Mock()
with mock.patch.object(rpc, 'get_notifier',
return_value=mock_notifier) as mock_get:
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_get.assert_called_once_with(service='compute',
host=self.node_name)
expected_metrics = [{
'timestamp': 1232,
'name': 'key1',
'value': 2600,
'source': 'libvirt'
}, {
'name': 'key2',
'source': 'libvirt',
'timestamp': 123,
'value': 1600
}]
payload = {
'metrics': expected_metrics,
'host': self.tracker.host,
'host_ip': CONF.my_ip,
'nodename': self.node_name
}
mock_notifier.info.assert_called_once_with(
self.context, 'compute.metrics.update', payload)
self.assertEqual(metrics, expected_metrics)
|
afrolov1/nova
|
nova/tests/compute/test_resource_tracker.py
|
Python
|
apache-2.0
| 43,467
|
[
"exciting"
] |
6b61a8399ad777ba2097d41f2932e5923ba4a0997d003f8bbc06e144b3a1d610
|
#!/usr/bin/env python
'''unit testing code for pysam.
Execute in the :file:`tests` directory as it requires the Makefile
and data files located there.
'''
import pysam
import pysam.samtools
import pysam.bcftools
import unittest
import os
import re
import glob
import sys
import subprocess
import shutil
from TestUtils import checkBinaryEqual, check_lines_equal, \
check_samtools_view_equal, get_temp_filename, force_bytes
IS_PYTHON3 = sys.version_info[0] >= 3
WORKDIR = "pysam_test_work"
DATADIR = "pysam_data"
def run_command(cmd):
'''run a samtools command'''
try:
retcode = subprocess.call(cmd, shell=True,
stderr=subprocess.PIPE)
if retcode < 0:
print("Child was terminated by signal", -retcode)
except OSError as e:
print("Execution failed:", e)
def get_version(executable):
'''return samtools/bcftools version'''
with subprocess.Popen(executable, shell=True,
stderr=subprocess.PIPE).stderr as pipe:
lines = b"".join(pipe.readlines())
if IS_PYTHON3:
lines = lines.decode('ascii')
try:
x = re.search("Version:\s+(\S+)", lines).groups()[0]
except AttributeError:
raise ValueError("could not get version from %s" % lines)
return x
class SamtoolsTest(unittest.TestCase):
'''test samtools command line commands and compare
against pysam commands.
Tests fail, if the output is not binary identical.
'''
requisites = [
"ex1.fa", "ex1.fa.fai",
"ex1.sam.gz",
"ex1.bam", "ex1.bam.bai",
"ex1.sam", "ex2.bam",
"ex1.bed"]
# a list of statements to test
# should contain at least one %(out)s component indicating
# an output file.
statements = [
"view ex1.bam > %(out)s_ex1.view",
# ("view -bT ex1.fa -o %(out)s_ex1.view2 ex1.sam",
"sort ex1.bam -o %(out)s_ex1.sort.bam",
"mpileup ex1.bam > %(out)s_ex1.pileup",
"depth ex1.bam > %(out)s_ex1.depth",
# TODO: issues with file naming
# "faidx ex1.fa; %(out)s_ex1.fa.fai",
"index ex1.bam %(out)s_ex1.bam.fai",
"idxstats ex1.bam > %(out)s_ex1.idxstats",
"fixmate ex1.bam %(out)s_ex1.fixmate.bam",
"flagstat ex1.bam > %(out)s_ex1.flagstat",
# Fails python 3.3 on linux, passes on OsX and when
# run locally
"calmd ex1.bam ex1.fa > %(out)s_ex1.calmd.bam",
# use -s option, otherwise the following error in samtools 1.2:
# Samtools-htslib-API: bam_get_library() not yet implemented
# causes downstream problems
# TODO: The following cause subsequent commands to fail
# unknow option
# "rmdup -s ex1.bam %(out)s_ex1.rmdup.bam",
# "merge -f %(out)s_ex1.merge.bam ex1.bam ex1.bam",
"reheader ex1.sam ex1.bam > %(out)s_ex1.reheader",
"cat -o %(out)s_ex1.cat.bam ex1.bam ex1.bam",
"targetcut ex1.bam > %(out)s_ex1.targetcut",
"phase ex1.bam > %(out)s_ex1.phase",
"import ex1.fa.fai ex1.sam.gz %(out)s_ex1.bam",
"bam2fq ex1.bam > %(out)s_ex1.bam2fq",
# TODO: not the same
# "pad2unpad -T ex1.fa ex2.bam > %(out)s_ex2.unpad",
# TODO: command line option problem
# "bamshuf ex1.bam -O --output-fmt SAM > %(out)s_ex1.bamshuf.sam",
# "collate ex1.bam %(out)s_ex1.collate",
"bedcov ex1.bed ex1.bam > %(out)s_ex1.bedcov",
"stats ex1.bam > %(out)s_ex1.stats",
"dict ex1.bam > %(out)s_ex1.dict",
# TODO: not the same
# ("addreplacerg -r 'RG\tID:ga\tSM:hs' ex1.bam > %(out)s_ex1.addreplacerg",
]
map_command = {
"import": "samimport"}
executable = "samtools"
module = pysam.samtools
def check_version(self):
samtools_version = get_version(self.executable)
def _r(s):
# patch - remove any of the alpha/beta suffixes, i.e., 0.1.12a ->
# 0.1.12
if s.count('-') > 0:
s = s[0:s.find('-')]
return re.sub("[^0-9.]", "", s)
if _r(samtools_version) != _r(pysam.__samtools_version__):
raise ValueError(
"versions of pysam.%s and %s differ: %s != %s" %
(self.executable,
self.executable,
pysam.__samtools_version__,
samtools_version))
def setUp(self):
'''setup tests.
For setup, all commands will be run before the first test is
executed. Individual tests will then just compare the output
files.
'''
self.check_version()
if not os.path.exists(WORKDIR):
os.makedirs(WORKDIR)
for f in self.requisites:
shutil.copy(os.path.join(DATADIR, f),
os.path.join(WORKDIR, f))
self.savedir = os.getcwd()
os.chdir(WORKDIR)
return
def check_statement(self, statement):
parts = statement.split(" ")
r_samtools = {"out": self.executable}
r_pysam = {"out": "pysam"}
command = parts[0]
command = self.map_command.get(command, command)
# self.assertTrue(command in pysam.SAMTOOLS_DISPATCH)
targets = [x for x in parts if "%(out)s" in x]
samtools_targets = [x % r_samtools for x in targets]
pysam_targets = [x % r_pysam for x in targets]
pysam_method = getattr(self.module, command)
# run samtools
full_statement = re.sub("%\(out\)s", self.executable, statement)
run_command(" ".join((self.executable, full_statement)))
# sys.stdout.write("%s %s ok" % (command, self.executable))
# run pysam
if ">" in statement:
assert parts[-2] == ">"
parts = parts[:-2]
# avoid interpolation to preserve string quoting, tab chars, etc.
pysam_parts = [re.sub("%\(out\)s", "pysam", x) for x in parts[1:]]
output = pysam_method(*pysam_parts,
raw=True,
catch_stdout=True)
# sys.stdout.write(" pysam ok\n")
if ">" in statement:
with open(pysam_targets[-1], "wb") as outfile:
if output is not None:
outfile.write(force_bytes(output))
for samtools_target, pysam_target in zip(samtools_targets,
pysam_targets):
if os.path.isdir(samtools_target):
samtools_files = glob.glob(os.path.join(
samtools_target, "*"))
pysam_files = glob.glob(os.path.join(pysam_target, "*"))
self.assertEqual(len(samtools_files), len(pysam_files))
# need to be able to exclude files like README, etc.
continue
else:
samtools_files = [samtools_target]
pysam_files = [pysam_target]
for s, p in zip(samtools_files, pysam_files):
binary_equal = checkBinaryEqual(s, p)
error_msg = "%s failed: files %s and %s are not the same" % (command, s, p)
if binary_equal:
continue
if s.endswith(".bam"):
self.assertTrue(
check_samtools_view_equal(
s, p, without_header=True),
error_msg)
check_lines_equal(
self, s, p,
filter_f=lambda x: x.startswith("#"),
msg=error_msg)
def testStatements(self):
for statement in self.statements:
if (statement.startswith("calmd") and
list(sys.version_info[:2]) == [3, 3]):
# skip calmd test, fails only on python 3.3.5
# in linux (empty output). Works in OsX and passes
# for 3.4 and 3.5, see issue #293
continue
self.check_statement(statement)
def tearDown(self):
if os.path.exists(WORKDIR):
shutil.rmtree(WORKDIR)
os.chdir(self.savedir)
class EmptyIndexTest(unittest.TestCase):
def testEmptyIndex(self):
self.assertRaises(IOError, pysam.samtools.index,
"exdoesntexist.bam")
class TestReturnType(unittest.TestCase):
def testReturnValueString(self):
retval = pysam.idxstats(os.path.join(DATADIR, "ex1.bam"))
if IS_PYTHON3:
self.assertFalse(isinstance(retval, bytes))
self.assertTrue(isinstance(retval, str))
else:
self.assertTrue(isinstance(retval, bytes))
self.assertTrue(isinstance(retval, basestring))
def testReturnValueData(self):
args = "-O BAM {}".format(os.path.join(DATADIR, "ex1.bam")).split(" ")
retval = pysam.view(*args)
if IS_PYTHON3:
self.assertTrue(isinstance(retval, bytes))
self.assertFalse(isinstance(retval, str))
else:
self.assertTrue(isinstance(retval, bytes))
self.assertTrue(isinstance(retval, basestring))
class StdoutTest(unittest.TestCase):
'''test if stdout can be redirected.'''
def testWithRedirectedStdout(self):
r = pysam.samtools.flagstat(
os.path.join(DATADIR, "ex1.bam"))
self.assertTrue(len(r) > 0)
def testWithoutRedirectedStdout(self):
r = pysam.samtools.flagstat(
os.path.join(DATADIR, "ex1.bam"),
catch_stdout=False)
self.assertEqual(r, None)
def testDoubleCalling(self):
# The following would fail if there is an
# issue with stdout being improperly caught.
retvals = pysam.idxstats(
os.path.join(DATADIR, "ex1.bam"))
retvals = pysam.idxstats(
os.path.join(DATADIR, "ex1.bam"))
def testSaveStdout(self):
outfile = get_temp_filename(suffix=".tsv")
r = pysam.samtools.flagstat(
os.path.join(DATADIR, "ex1.bam"),
save_stdout=outfile)
self.assertEqual(r, None)
with open(outfile) as inf:
r = inf.read()
self.assertTrue(len(r) > 0)
class PysamTest(SamtoolsTest):
"""check access to samtools command in the pysam
main package.
This is for backwards capability.
"""
module = pysam
class BcftoolsTest(SamtoolsTest):
requisites = [
"ex1.fa",
"ex1.vcf.gz",
"ex1.vcf.gz.tbi",
]
# a list of statements to test
# should contain at least one %(out)s component indicating
# an output file.
statements = [
# "index -n ex1.vcf.gz > %(out)s_ex1.index",
"annotate -x ID ex1.vcf.gz > %(out)s_ex1.annotate",
"concat -a ex1.vcf.gz ex1.vcf.gz > %(out)s_ex1.concat",
"isec -p %(out)s_ex1.isec ex1.vcf.gz ex1.vcf.gz",
"merge --force-samples ex1.vcf.gz ex1.vcf.gz > %(out)s_ex1.norm",
"norm -m +both ex1.vcf.gz > %(out)s_ex1.norm",
# "plugin",
# "query -f '%CHROM\n' ex1.vcf.gz > %(out)s_ex1.query",
# "reheader -s A > %(out)s_ex1.reheader",
# "view ex1.vcf.gz > %(out)s_ex1.view",
# "call -m ex1.vcf.gz > %(out)s_ex1.call",
# bad file descriptor
# "consensus -f ex1.fa ex1.vcf.gz > %(out)s_ex1.consensus"
# need appropriate VCF file
# "cnv",
# segfault
# "filter -s A ex1.vcf.gz > %(out)s_ex1.filter",
# exit
# "gtcheck -s A ex1.vcf.gz > %(out)s_ex1.gtcheck",
"roh -s A ex1.vcf.gz > %(out)s_ex1.roh",
"stats ex1.vcf.gz > %(out)s_ex1.stats",
]
map_command = {
"import": "samimport"}
executable = "bcftools"
module = pysam.bcftools
if __name__ == "__main__":
# build data files
print ("building data files")
subprocess.call("make -C %s" % DATADIR, shell=True)
print ("starting tests")
unittest.main()
print ("completed tests")
|
bioinformed/pysam
|
tests/samtools_test.py
|
Python
|
mit
| 12,004
|
[
"pysam"
] |
5744bbbbed0fb6472124354e226e95caa87c9c3c049028e27fd4fd9c171d8e03
|
from dolfin import *
from fenics import *
from ply2fn import *
from code import *
from iterator import *
from normal_deriv import *
#comm=dolfin.mpi_comm_self()
#comm.MPI_Init_thread()
#filename="../neuron_mesh_data/neuron"
#ibdry=9
#obdry=10
filename="../head_mesh_data/head"
ibdry=10
obdry=9
#this tries to parse a ply file into a vertex mesh (is that a thing?) and spits out a dolfin xml file
#comm=dolfin.mpi_comm_self()
#if MPI.rank(comm) == 0:
# print(str(MPI.rank(comm))+'\n')
#gets boundary mesh and boundary values for said mesh
infile=file(filename+"_bvals.ply","r")
[mesh,Q,vals]=ply2bvals(infile)
infile.close()
mesh2 = Mesh(filename+".xml")
boundaries = MeshFunction('size_t',mesh2,filename+"_facet_region.xml")
V = FunctionSpace(mesh2, "Lagrange",1)
bvals=extend_boundary_fun(V,Q,vals)
#Compute external measurements
w=inorout(V,boundaries,bvals,ibdry,obdry,"insideout")
w.vector()[:]/=255.
nvs=get_facet_normal(V,boundaries,ibdry)
e0=Constant((1.,0.,0.))
e1=Constant((0.,1.,0.))
e2=Constant((0.,0.,1.))
nvecs=e0*nvs[0]+e1*nvs[1]+e2*nvs[2]
[delta,sol]=bval_problem_iterator(V,boundaries,w,ibdry,obdry,nvecs,65)
v=Function(V)
v.vector()[:]=w.vector()[:]-sol.vector()[:]
vsq=Function(V)
vsq.vector()[:]=v.vector()[:]*v.vector()[:]
wsq=Function(V)
wsq.vector()[:]=w.vector()[:]*w.vector()[:]
epsilon=np.sqrt(assemble(vsq*dx))
gamma=np.sqrt(assemble(wsq*dx))
error=epsilon/gamma
#File("../output/sol.pvd") << sol
File("../output/bvals.pvd") << w
#File("../output/delta.pvd") << delta
|
jcrist1/iterated_bdry_solver
|
src/main.py
|
Python
|
mit
| 1,506
|
[
"NEURON"
] |
7275c33a55b1857e8bd6708414c5ec7a26dd2ba6ff3be21d39bb4d5f625a1e88
|
import tests.base
from netcdf import netcdf as nc
import os
import stat
import numpy as np
class TestNetcdf(tests.base.TestCase):
def test_open_unexistent_file(self):
with self.assertRaisesRegexp(Exception, u'There is not file list or '
'pattern to open.'):
nc.open([])
def test_open_unexistent_pattern(self):
with self.assertRaisesRegexp(Exception, u'There is not file list or '
'pattern to open.'):
nc.open('')
def test_open_close_existent_file(self):
# check if open an existent file.
root, is_new = nc.open('unittest00.nc')
self.assertEquals(root.files, ['unittest00.nc'])
self.assertEquals(root.pattern, 'unittest00.nc')
self.assertEquals(len(root.roots), 1)
self.assertFalse(is_new)
self.assertFalse(root.read_only)
# check if close an existent file.
nc.close(root)
with self.assertRaisesRegexp(RuntimeError, u'NetCDF: Not a valid ID'):
nc.close(root)
def test_open_close_new_file(self):
# delete the filename from the system.
filename = 'unittest-1.nc'
if os.path.isfile(filename):
os.remove(filename)
# check if create and open a new file.
root, is_new = nc.open(filename)
self.assertEquals(root.files, ['unittest-1.nc'])
self.assertEquals(root.pattern, 'unittest-1.nc')
self.assertEquals(len(root.roots), 1)
self.assertTrue(is_new)
self.assertFalse(root.read_only)
# check if close the created file.
nc.close(root)
with self.assertRaisesRegexp(RuntimeError, u'NetCDF: Not a valid ID'):
nc.close(root)
def test_open_close_readonly_file(self):
# set the file to be readonly.
filename = 'ro_unittest.nc'
if os.path.isfile(filename):
os.chmod(filename, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
# check if create and open a new file.
root, is_new = nc.open(filename)
self.assertEquals(root.files, [filename])
self.assertEquals(root.pattern, filename)
self.assertEquals(len(root.roots), 1)
self.assertFalse(is_new)
self.assertTrue(root.read_only)
# check if close the readonly file.
nc.close(root)
with self.assertRaisesRegexp(RuntimeError, u'NetCDF: Not a valid ID'):
nc.close(root)
def test_open_close_file_with_readonly_restriction(self):
# check the file is NOT read only.
filename = 'unittest00.nc'
can_write = os.access(filename, os.W_OK)
self.assertTrue(can_write)
# check if open an existent file.
root, is_new = nc.open('unittest00.nc', read_only=True)
self.assertEquals(root.files, ['unittest00.nc'])
self.assertEquals(root.pattern, 'unittest00.nc')
self.assertEquals(len(root.roots), 1)
self.assertFalse(is_new)
self.assertTrue(root.read_only)
with self.assertRaisesRegexp(Exception, u'NetCDF: Write to read only'):
var = nc.getvar(root, 'data')
var[:] = 0
# check if close an existent file.
nc.close(root)
with self.assertRaisesRegexp(RuntimeError, u'NetCDF: Not a valid ID'):
nc.close(root)
def test_open_close_multiple_files_with_readonly_restriction(self):
# check the files are NOT read only.
filenames = map(lambda i: 'unittest0%i.nc' % i, range(5))
can_write = map(lambda f: os.access(f, os.W_OK), filenames)
self.assertTrue(all(can_write))
# check if open the pattern selection using using a package instance.
root, is_new = nc.open('unittest0*.nc', read_only=True)
self.assertEquals(root.files, ['unittest0%i.nc' % i for i in range(5)])
self.assertEquals(root.pattern, 'unittest0*.nc')
self.assertEquals(len(root.roots), 5)
self.assertFalse(is_new)
self.assertTrue(root.read_only)
with self.assertRaisesRegexp(Exception, u'NetCDF: Write to read only'):
var = nc.getvar(root, 'data')
var[:] = 0
# check if close the package with all the files.
nc.close(root)
with self.assertRaisesRegexp(RuntimeError, u'NetCDF: Not a valid ID'):
nc.close(root)
def test_open_close_multiple_files(self):
# check if open the pattern selection using using a package instance.
root, is_new = nc.open('unittest0*.nc')
self.assertEquals(root.files, ['unittest0%i.nc' % i for i in range(5)])
self.assertEquals(root.pattern, 'unittest0*.nc')
self.assertEquals(len(root.roots), 5)
self.assertFalse(is_new)
self.assertFalse(root.read_only)
# check if close the package with all the files.
nc.close(root)
with self.assertRaisesRegexp(RuntimeError, u'NetCDF: Not a valid ID'):
nc.close(root)
def test_open_close_using_with(self):
# check if open the pattern selection using using a package instance.
with nc.loader('unittest0*.nc') as root:
self.assertEquals(root.files,
['unittest0%i.nc' % i for i in range(5)])
self.assertEquals(root.pattern, 'unittest0*.nc')
self.assertEquals(len(root.roots), 5)
self.assertFalse(root.is_new)
self.assertFalse(root.read_only)
# check if close the package with all the files.
with self.assertRaisesRegexp(RuntimeError, u'NetCDF: Not a valid ID'):
nc.close(root)
def test_get_existing_dim_single_file(self):
# check if get the dimension in a single file.
root = nc.open('unittest00.nc')[0]
self.assertEquals(len(nc.getdim(root, 'time')), 1)
nc.close(root)
def test_get_not_existing_dim_single_file(self):
# check if get the dimension in a single file.
root = nc.open('unittest00.nc')[0]
self.assertFalse(root.has_dimension('the_12th_dimension'))
self.assertEquals(len(nc.getdim(root, 'the_12th_dimension', 123)), 1)
self.assertTrue(root.has_dimension('the_12th_dimension'))
nc.close(root)
def test_get_existing_dim_multiple_file(self):
# check if get the dimension in a single file.
root = nc.open('unittest0*.nc')[0]
self.assertEquals(len(nc.getdim(root, 'time')), 5)
nc.close(root)
def test_get_not_existing_dim_multiple_file(self):
# check if get the dimension in a single file.
root = nc.open('unittest0*.nc')[0]
self.assertFalse(root.has_dimension('the_12th_dimension'))
self.assertEquals(len(nc.getdim(root, 'the_12th_dimension', 123)), 5)
self.assertTrue(root.has_dimension('the_12th_dimension'))
nc.close(root)
def test_get_existing_var_single_file(self):
# check if get the variable in a single file.
root = nc.open('unittest00.nc')[0]
self.assertNotIn('data', root.variables)
var = nc.getvar(root, 'data')
self.assertEquals(var.shape, (1, 100, 200))
self.assertIn('data', root.variables)
are_equals = (var[:] == self.data)
self.assertTrue(are_equals.all())
nc.close(root)
def test_get_non_existing_var_single_file(self):
# check if get the variable in a single file.
root = nc.open('unittest00.nc')[0]
self.assertNotIn('new_variable', root.variables)
var = nc.getvar(root, 'new_variable',
'f4', ('time', 'yc', 'xc'),
digits=3, fill_value=1.2)
self.assertEquals(var.shape, (1, 100, 200))
self.assertIn('new_variable', root.variables)
ref = np.zeros(var.shape) + 1.2
# the comparison is true if the error is less than 0.002
are_equals = (var[:] - ref) < 0.002
self.assertTrue(are_equals.all())
nc.close(root)
def test_get_existing_var_multiple_file(self):
# check if get the variable with multiples files.
root = nc.open('unittest0*.nc')[0]
self.assertNotIn('data', root.variables)
var = nc.getvar(root, 'data')
self.assertEquals(var.shape, (5, 100, 200))
self.assertIn('data', root.variables)
are_equals = (var[:] == self.data)
self.assertTrue(are_equals.all())
nc.close(root)
def test_get_non_existing_var_multiple_file(self):
# check if get the variable with multiples files.
root = nc.open('unittest0*.nc')[0]
self.assertNotIn('new_variable', root.variables)
var = nc.getvar(root, 'new_variable',
'f4', ('time', 'yc', 'xc'),
digits=3, fill_value=1.2)
self.assertEquals(var.shape, (5, 100, 200))
self.assertIn('new_variable', root.variables)
ref = np.zeros(var.shape) + 1.2
# the comparison is true if the error is less than 0.002
are_equals = (var[:] - ref) < 0.002
self.assertTrue(are_equals.all())
nc.close(root)
def test_single_file_var_operations(self):
# check if get and set the numpy matrix.
root = nc.open('unittest00.nc')[0]
var = nc.getvar(root, 'data')
self.assertEquals(var.__class__, nc.SingleNCVariable)
self.assertEquals(var[:].__class__, np.ndarray)
tmp = var[:]
var[:] = var[:] + 1
nc.close(root)
# check if value was saved into the file.
root = nc.open('unittest00.nc')[0]
var = nc.getvar(root, 'data')
self.assertTrue(var, tmp + 1)
nc.close(root)
def test_multiple_file_var_operations(self):
# check if get and set the numpy matrix.
root = nc.open('unittest0*.nc')[0]
var = nc.getvar(root, 'data')
self.assertEquals(var.__class__, nc.DistributedNCVariable)
self.assertEquals(var[:].__class__, np.ndarray)
tmp = var[:]
var[:] = var[:] + 1
nc.close(root)
# check if value was saved into the file.
root = nc.open('unittest0*.nc')[0]
var = nc.getvar(root, 'data')
self.assertTrue(var, tmp + 1)
nc.close(root)
def test_single_file_new_var_operations(self):
# check if create a new var.
root = nc.open('unittest00.nc')[0]
var = nc.getvar(root, 'new_variable',
'f4', ('time', 'yc', 'xc'),
digits=3, fill_value=1.0)
self.assertTrue((var[:] == 1.0).all())
self.assertEquals(var.__class__, nc.SingleNCVariable)
self.assertEquals(var[:].__class__, np.ndarray)
tmp = var[:]
var[:] = var[:] + 1
nc.close(root)
# check if value was saved into the file.
root = nc.open('unittest00.nc')[0]
var = nc.getvar(root, 'new_variable')
self.assertEquals(var, tmp + 1)
nc.close(root)
def test_multiple_file_new_var_operations(self):
# check if create a new var.
root = nc.open('unittest0*.nc')[0]
var = nc.getvar(root, 'new_variable',
'f4', ('time', 'yc', 'xc'),
digits=3, fill_value=1.0)
self.assertTrue((var[:] == 1.0).all())
self.assertEquals(var.__class__, nc.DistributedNCVariable)
self.assertEquals(var[:].__class__, np.ndarray)
tmp = var[:]
var[:] = var[:] + 1
nc.close(root)
# check if value was saved into the files.
root = nc.open('unittest00.nc')[0]
var = nc.getvar(root, 'new_variable')
self.assertEquals(var, tmp + 1)
nc.close(root)
def test_character_variables_in_single_file(self):
# check if get and set the numpy string matrix in single files.
root = nc.open('unittest00.nc')[0]
var = nc.getvar(root, 'auditTrail')
self.assertEquals(var.shape, (1, 2, 80))
self.assertEquals(var, self.auditTrail)
self.auditTrail[:].data[0:6] = 'CHANGE'
var[0, 0:6] = np.array(list('CHANGE'))
self.assertEquals(var, self.auditTrail)
nc.close(root)
def test_character_variables_in_multiple_file(self):
# check if get and set the numpy string matrix in multiple files.
root = nc.open('unittest0*.nc')[0]
var = nc.getvar(root, 'auditTrail')
self.assertEquals(var.shape, (5, 2, 80))
result = np.vstack([[self.auditTrail] for i in range(5)])
self.assertEquals(var, result)
for i in range(5):
result[i, i % 2].data[0:6] = 'CHANGE'
var[i, i % 2, 0:6] = np.array(list('CHANGE'))
self.assertEquals(var, result)
nc.close(root)
# check if was writed to each file.
root = nc.open('unittest0*.nc')[0]
var = nc.getvar(root, 'auditTrail')
self.assertEquals(var, result)
nc.close(root)
def test_get_var_copy_from_source(self):
root = nc.open('unittest0*.nc')[0]
if os.path.isfile('unittest_destiny.nc'):
os.remove('unittest_destiny.nc')
root_d = nc.open('unittest_destiny.nc')[0]
# check if getvar copy a variable from a complex file to a simple file.
var_source = nc.getvar(root, 'data')
var = nc.getvar(root_d, 'data_copy', source=var_source)
self.assertEquals(var, var_source)
# check if getvar copy a variable from a simple file to a complex file.
var_distributed = nc.getvar(root, 'data_copy', source=var)
self.assertEquals(var, var_distributed)
# check if getvar copy changing the vtype to a simple file.
var_int = nc.getvar(root_d, 'data_int', 'i4', source=var_source)
self.assertEquals(var_source.vtype, 'f4')
self.assertEquals(var_int.vtype, 'i4')
diff = var_source[:] - var_int[:]
self.assertTrue((diff < 1).all())
# check if getvar copy changing the vtype to a multiple file.
var_distributed_int = nc.getvar(root, 'data_int', 'i4', source=var)
self.assertEquals(var_distributed.vtype, 'f4')
self.assertEquals(var_distributed_int.vtype, 'i4')
diff = var_distributed[:] - var_distributed_int[:]
self.assertTrue((diff < 1).all())
if __name__ == '__main__':
tests.base.main()
|
gersolar/netcdf
|
tests/netcdf_test.py
|
Python
|
mit
| 14,336
|
[
"NetCDF"
] |
78ceabc4a80a11e8d3c22bb9ccaf79cd39942f00f9dd4ab851c7e17953e9c4d7
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import json
import os
import unittest
from pymatgen.core.structure import Molecule
from pymatgen.io.nwchem import NwInput, NwInputError, NwOutput, NwTask
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "nwchem")
coords = [
[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000],
]
mol = Molecule(["C", "H", "H", "H", "H"], coords)
class NwTaskTest(unittest.TestCase):
def setUp(self):
self.task = NwTask(
0,
1,
basis_set={"H": "6-31g"},
theory="dft",
theory_directives={"xc": "b3lyp"},
)
self.task_cosmo = NwTask(
0,
1,
basis_set={"H": "6-31g"},
theory="dft",
theory_directives={"xc": "b3lyp"},
alternate_directives={"cosmo": "cosmo"},
)
self.task_esp = NwTask(0, 1, basis_set={"H": "6-31g"}, theory="esp")
def test_multi_bset(self):
t = NwTask.from_molecule(
mol,
theory="dft",
basis_set={"C": "6-311++G**", "H": "6-31++G**"},
theory_directives={"xc": "b3lyp"},
)
ans = """title "H4C1 dft optimize"
charge 0
basis cartesian
C library "6-311++G**"
H library "6-31++G**"
end
dft
xc b3lyp
end
task dft optimize"""
self.assertEqual(str(t), ans)
def test_str_and_from_string(self):
ans = """title "dft optimize"
charge 0
basis cartesian
H library "6-31g"
end
dft
xc b3lyp
end
task dft optimize"""
self.assertEqual(str(self.task), ans)
def test_to_from_dict(self):
d = self.task.as_dict()
t = NwTask.from_dict(d)
self.assertIsInstance(t, NwTask)
def test_init(self):
self.assertRaises(NwInputError, NwTask, 0, 1, {"H": "6-31g"}, theory="bad")
self.assertRaises(NwInputError, NwTask, 0, 1, {"H": "6-31g"}, operation="bad")
def test_dft_task(self):
task = NwTask.dft_task(mol, charge=1, operation="energy")
ans = """title "H4C1 dft energy"
charge 1
basis cartesian
C library "6-31g"
H library "6-31g"
end
dft
mult 2
xc b3lyp
end
task dft energy"""
self.assertEqual(str(task), ans)
def test_dft_cosmo_task(self):
task = NwTask.dft_task(
mol,
charge=mol.charge,
operation="energy",
xc="b3lyp",
basis_set="6-311++G**",
alternate_directives={"cosmo": {"dielec": 78.0}},
)
ans = """title "H4C1 dft energy"
charge 0
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 1
xc b3lyp
end
cosmo
dielec 78.0
end
task dft energy"""
self.assertEqual(str(task), ans)
def test_esp_task(self):
task = NwTask.esp_task(mol, charge=mol.charge, operation="", basis_set="6-311++G**")
ans = """title "H4C1 esp "
charge 0
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
task esp """
self.assertEqual(str(task), ans)
class NwInputTest(unittest.TestCase):
def setUp(self):
tasks = [
NwTask.dft_task(mol, operation="optimize", xc="b3lyp", basis_set="6-31++G*"),
NwTask.dft_task(mol, operation="freq", xc="b3lyp", basis_set="6-31++G*"),
NwTask.dft_task(mol, operation="energy", xc="b3lyp", basis_set="6-311++G**"),
NwTask.dft_task(
mol,
charge=mol.charge + 1,
operation="energy",
xc="b3lyp",
basis_set="6-311++G**",
),
NwTask.dft_task(
mol,
charge=mol.charge - 1,
operation="energy",
xc="b3lyp",
basis_set="6-311++G**",
),
]
self.nwi = NwInput(
mol,
tasks,
geometry_options=["units", "angstroms", "noautoz"],
memory_options="total 1000 mb",
)
self.nwi_symm = NwInput(
mol,
tasks,
geometry_options=["units", "angstroms", "noautoz"],
symmetry_options=["c1"],
)
def test_str(self):
ans = """memory total 1000 mb
geometry units angstroms noautoz
C 0.0 0.0 0.0
H 0.0 0.0 1.089
H 1.026719 0.0 -0.363
H -0.51336 -0.889165 -0.363
H -0.51336 0.889165 -0.363
end
title "H4C1 dft optimize"
charge 0
basis cartesian
C library "6-31++G*"
H library "6-31++G*"
end
dft
mult 1
xc b3lyp
end
task dft optimize
title "H4C1 dft freq"
charge 0
basis cartesian
C library "6-31++G*"
H library "6-31++G*"
end
dft
mult 1
xc b3lyp
end
task dft freq
title "H4C1 dft energy"
charge 0
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 1
xc b3lyp
end
task dft energy
title "H4C1 dft energy"
charge 1
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 2
xc b3lyp
end
task dft energy
title "H4C1 dft energy"
charge -1
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 2
xc b3lyp
end
task dft energy
"""
self.assertEqual(str(self.nwi), ans)
ans_symm = """geometry units angstroms noautoz
symmetry c1
C 0.0 0.0 0.0
H 0.0 0.0 1.089
H 1.026719 0.0 -0.363
H -0.51336 -0.889165 -0.363
H -0.51336 0.889165 -0.363
end
title "H4C1 dft optimize"
charge 0
basis cartesian
C library "6-31++G*"
H library "6-31++G*"
end
dft
mult 1
xc b3lyp
end
task dft optimize
title "H4C1 dft freq"
charge 0
basis cartesian
C library "6-31++G*"
H library "6-31++G*"
end
dft
mult 1
xc b3lyp
end
task dft freq
title "H4C1 dft energy"
charge 0
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 1
xc b3lyp
end
task dft energy
title "H4C1 dft energy"
charge 1
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 2
xc b3lyp
end
task dft energy
title "H4C1 dft energy"
charge -1
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 2
xc b3lyp
end
task dft energy
"""
self.assertEqual(str(self.nwi_symm), ans_symm)
def test_to_from_dict(self):
d = self.nwi.as_dict()
nwi = NwInput.from_dict(d)
self.assertIsInstance(nwi, NwInput)
# Ensure it is json-serializable.
json.dumps(d)
d = self.nwi_symm.as_dict()
nwi_symm = NwInput.from_dict(d)
self.assertIsInstance(nwi_symm, NwInput)
json.dumps(d)
def test_from_string_and_file(self):
nwi = NwInput.from_file(os.path.join(test_dir, "ch4.nw"))
self.assertEqual(nwi.tasks[0].theory, "dft")
self.assertEqual(nwi.memory_options, "total 1000 mb stack 400 mb")
self.assertEqual(nwi.tasks[0].basis_set["C"], "6-31++G*")
self.assertEqual(nwi.tasks[-1].basis_set["C"], "6-311++G**")
# Try a simplified input.
str_inp = """start H4C1
geometry units angstroms
C 0.0 0.0 0.0
H 0.0 0.0 1.089
H 1.026719 0.0 -0.363
H -0.51336 -0.889165 -0.363
H -0.51336 0.889165 -0.363
end
title "H4C1 dft optimize"
charge 0
basis cartesian
H library "6-31++G*"
C library "6-31++G*"
end
dft
xc b3lyp
mult 1
end
task scf optimize
title "H4C1 dft freq"
charge 0
task scf freq
title "H4C1 dft energy"
charge 0
basis cartesian
H library "6-311++G**"
C library "6-311++G**"
end
task dft energy
title "H4C1 dft energy"
charge 1
dft
xc b3lyp
mult 2
end
task dft energy
title "H4C1 dft energy"
charge -1
task dft energy
"""
nwi = NwInput.from_string(str_inp)
self.assertEqual(nwi.geometry_options, ["units", "angstroms"])
self.assertEqual(nwi.tasks[0].theory, "scf")
self.assertEqual(nwi.tasks[0].basis_set["C"], "6-31++G*")
self.assertEqual(nwi.tasks[-1].theory, "dft")
self.assertEqual(nwi.tasks[-1].basis_set["C"], "6-311++G**")
str_inp_symm = str_inp.replace("geometry units angstroms", "geometry units angstroms\n symmetry c1")
nwi_symm = NwInput.from_string(str_inp_symm)
self.assertEqual(nwi_symm.geometry_options, ["units", "angstroms"])
self.assertEqual(nwi_symm.symmetry_options, ["c1"])
self.assertEqual(nwi_symm.tasks[0].theory, "scf")
self.assertEqual(nwi_symm.tasks[0].basis_set["C"], "6-31++G*")
self.assertEqual(nwi_symm.tasks[-1].theory, "dft")
self.assertEqual(nwi_symm.tasks[-1].basis_set["C"], "6-311++G**")
class NwOutputTest(unittest.TestCase):
def test_read(self):
nwo = NwOutput(os.path.join(test_dir, "CH4.nwout"))
nwo_cosmo = NwOutput(os.path.join(test_dir, "N2O4.nwout"))
self.assertEqual(0, nwo[0]["charge"])
self.assertEqual(-1, nwo[-1]["charge"])
self.assertEqual(len(nwo), 5)
self.assertAlmostEqual(-1102.6224491715582, nwo[0]["energies"][-1], 2)
self.assertAlmostEqual(-1102.9986291578023, nwo[2]["energies"][-1], 3)
self.assertAlmostEqual(-11156.354030653656, nwo_cosmo[5]["energies"][0]["cosmo scf"], 3)
self.assertAlmostEqual(-11153.374133394364, nwo_cosmo[5]["energies"][0]["gas phase"], 3)
self.assertAlmostEqual(-11156.353632962995, nwo_cosmo[5]["energies"][0]["sol phase"], 2)
self.assertAlmostEqual(-11168.818934311605, nwo_cosmo[6]["energies"][0]["cosmo scf"], 2)
self.assertAlmostEqual(-11166.3624424611462, nwo_cosmo[6]["energies"][0]["gas phase"], 2)
self.assertAlmostEqual(-11168.818934311605, nwo_cosmo[6]["energies"][0]["sol phase"], 2)
self.assertAlmostEqual(-11165.227959110889, nwo_cosmo[7]["energies"][0]["cosmo scf"], 2)
self.assertAlmostEqual(-11165.025443612385, nwo_cosmo[7]["energies"][0]["gas phase"], 2)
self.assertAlmostEqual(-11165.227959110154, nwo_cosmo[7]["energies"][0]["sol phase"], 2)
self.assertAlmostEqual(nwo[1]["hessian"][0][0], 4.60187e01)
self.assertAlmostEqual(nwo[1]["hessian"][1][2], -1.14030e-08)
self.assertAlmostEqual(nwo[1]["hessian"][2][3], 2.60819e01)
self.assertAlmostEqual(nwo[1]["hessian"][6][6], 1.45055e02)
self.assertAlmostEqual(nwo[1]["hessian"][11][14], 1.35078e01)
# CH4.nwout, line 722
self.assertAlmostEqual(nwo[0]["forces"][0][3], -0.001991)
# N2O4.nwout, line 1071
self.assertAlmostEqual(nwo_cosmo[0]["forces"][0][4], 0.011948)
# There should be four DFT gradients.
self.assertEqual(len(nwo_cosmo[0]["forces"]), 4)
ie = nwo[4]["energies"][-1] - nwo[2]["energies"][-1]
ea = nwo[2]["energies"][-1] - nwo[3]["energies"][-1]
self.assertAlmostEqual(0.7575358648355177, ie)
self.assertAlmostEqual(-14.997877958701338, ea, 3)
self.assertEqual(nwo[4]["basis_set"]["C"]["description"], "6-311++G**")
nwo = NwOutput(os.path.join(test_dir, "H4C3O3_1.nwout"))
self.assertTrue(nwo[-1]["has_error"])
self.assertEqual(nwo[-1]["errors"][0], "Bad convergence")
nwo = NwOutput(os.path.join(test_dir, "CH3CH2O.nwout"))
self.assertTrue(nwo[-1]["has_error"])
self.assertEqual(nwo[-1]["errors"][0], "Bad convergence")
nwo = NwOutput(os.path.join(test_dir, "C1N1Cl1_1.nwout"))
self.assertTrue(nwo[-1]["has_error"])
self.assertEqual(nwo[-1]["errors"][0], "autoz error")
nwo = NwOutput(os.path.join(test_dir, "anthrachinon_wfs_16_ethyl.nwout"))
self.assertTrue(nwo[-1]["has_error"])
self.assertEqual(nwo[-1]["errors"][0], "Geometry optimization failed")
nwo = NwOutput(os.path.join(test_dir, "anthrachinon_wfs_15_carboxyl.nwout"))
self.assertEqual(nwo[1]["frequencies"][0][0], -70.47)
self.assertEqual(len(nwo[1]["frequencies"][0][1]), 27)
self.assertEqual(nwo[1]["frequencies"][-1][0], 3696.74)
self.assertEqual(nwo[1]["frequencies"][-1][1][-1], (0.20498, -0.94542, -0.00073))
self.assertEqual(nwo[1]["normal_frequencies"][1][0], -70.72)
self.assertEqual(nwo[1]["normal_frequencies"][3][0], -61.92)
self.assertEqual(nwo[1]["normal_frequencies"][1][1][-1], (0.00056, 0.00042, 0.06781))
def test_parse_tddft(self):
nwo = NwOutput(os.path.join(test_dir, "phen_tddft.log"))
roots = nwo.parse_tddft()
self.assertEqual(len(roots["singlet"]), 20)
self.assertAlmostEqual(roots["singlet"][0]["energy"], 3.9291)
self.assertAlmostEqual(roots["singlet"][0]["osc_strength"], 0.0)
self.assertAlmostEqual(roots["singlet"][1]["osc_strength"], 0.00177)
def test_get_excitation_spectrum(self):
nwo = NwOutput(os.path.join(test_dir, "phen_tddft.log"))
spectrum = nwo.get_excitation_spectrum()
self.assertEqual(len(spectrum.x), 2000)
self.assertAlmostEqual(spectrum.x[0], 1.9291)
self.assertAlmostEqual(spectrum.y[0], 0.0)
self.assertAlmostEqual(spectrum.y[1000], 0.0007423569947114812)
if __name__ == "__main__":
unittest.main()
|
vorwerkc/pymatgen
|
pymatgen/io/tests/test_nwchem.py
|
Python
|
mit
| 13,075
|
[
"NWChem",
"pymatgen"
] |
b33f5dfa35ab8fca82d8f5ba91c6245821db60dd9b3fd2c370290714e6ece2b1
|
from typing import Any, Dict
EMOJI_NAME_MAPS: Dict[str, Dict[str, Any]] = {
# seems like best emoji for happy
"1f600": {"canonical_name": "grinning", "aliases": ["happy"]},
"1f603": {"canonical_name": "smiley", "aliases": []},
# the Google emoji for this is not great, so made People/9 'smile' and
# renamed this one
"1f604": {"canonical_name": "big_smile", "aliases": []},
# from gemoji/Unicode
"1f601": {"canonical_name": "grinning_face_with_smiling_eyes", "aliases": []},
# satisfied doesn't seem like a good description of these images
"1f606": {"canonical_name": "laughing", "aliases": ["lol"]},
"1f605": {"canonical_name": "sweat_smile", "aliases": []},
# laughter_tears from https://beebom.com/emoji-meanings/
"1f602": {"canonical_name": "joy", "aliases": ["tears", "laughter_tears"]},
"1f923": {"canonical_name": "rolling_on_the_floor_laughing", "aliases": ["rofl"]},
# not sure how the glyphs match relaxed, but both iamcal and gemoji have it
"263a": {"canonical_name": "smiling_face", "aliases": ["relaxed"]},
"1f60a": {"canonical_name": "blush", "aliases": []},
# halo comes from gemoji/Unicode
"1f607": {"canonical_name": "innocent", "aliases": ["halo"]},
"1f642": {"canonical_name": "smile", "aliases": []},
"1f643": {"canonical_name": "upside_down", "aliases": ["oops"]},
"1f609": {"canonical_name": "wink", "aliases": []},
"1f60c": {"canonical_name": "relieved", "aliases": []},
# in_love from https://beebom.com/emoji-meanings/
"1f60d": {"canonical_name": "heart_eyes", "aliases": ["in_love"]},
# blow_a_kiss from https://beebom.com/emoji-meanings/
"1f618": {"canonical_name": "heart_kiss", "aliases": ["blow_a_kiss"]},
"1f617": {"canonical_name": "kiss", "aliases": []},
"1f619": {"canonical_name": "kiss_smiling_eyes", "aliases": []},
"1f61a": {"canonical_name": "kiss_with_blush", "aliases": []},
"1f60b": {"canonical_name": "yum", "aliases": []},
# crazy from https://beebom.com/emoji-meanings/, seems like best emoji for
# joking
"1f61b": {"canonical_name": "stuck_out_tongue", "aliases": ["mischievous"]},
"1f61c": {"canonical_name": "stuck_out_tongue_wink", "aliases": ["joking", "crazy"]},
"1f61d": {"canonical_name": "stuck_out_tongue_closed_eyes", "aliases": []},
# kaching suggested by user
"1f911": {"canonical_name": "money_face", "aliases": ["kaching"]},
# arms_open seems like a natural addition
"1f917": {"canonical_name": "hug", "aliases": ["arms_open"]},
"1f913": {"canonical_name": "nerd", "aliases": ["geek"]},
# several sites suggested this was used for "cool", but cool is taken by
# Symbols/137
"1f60e": {"canonical_name": "sunglasses", "aliases": []},
"1f921": {"canonical_name": "clown", "aliases": []},
"1f920": {"canonical_name": "cowboy", "aliases": []},
# https://emojipedia.org/smirking-face/
"1f60f": {"canonical_name": "smirk", "aliases": ["smug"]},
"1f612": {"canonical_name": "unamused", "aliases": []},
"1f61e": {"canonical_name": "disappointed", "aliases": []},
# see People/41
"1f614": {"canonical_name": "pensive", "aliases": ["tired"]},
"1f61f": {"canonical_name": "worried", "aliases": []},
# these seem to better capture the glyphs. This is also what :/ turns into
# in Google Hangouts
"1f615": {"canonical_name": "oh_no", "aliases": ["half_frown", "concerned", "confused"]},
"1f641": {"canonical_name": "frown", "aliases": ["slight_frown"]},
# sad seemed better than putting another frown as the primary name (see
# People/37)
"2639": {"canonical_name": "sad", "aliases": ["big_frown"]},
# helpless from https://emojipedia.org/persevering-face/
"1f623": {"canonical_name": "persevere", "aliases": ["helpless"]},
# agony seemed like a good addition
"1f616": {"canonical_name": "confounded", "aliases": ["agony"]},
# tired doesn't really match any of the 4 images, put it on People/34
"1f62b": {"canonical_name": "anguish", "aliases": []},
# distraught from https://beebom.com/emoji-meanings/
"1f629": {"canonical_name": "weary", "aliases": ["distraught"]},
"1f624": {"canonical_name": "triumph", "aliases": []},
"1f620": {"canonical_name": "angry", "aliases": []},
# mad and grumpy from https://beebom.com/emoji-meanings/, very_angry to
# parallel People/44 and show up in typeahead for "ang.."
"1f621": {"canonical_name": "rage", "aliases": ["mad", "grumpy", "very_angry"]},
# blank from https://beebom.com/emoji-meanings/, speechless and poker_face
# seemed like good ideas for this
"1f636": {"canonical_name": "speechless", "aliases": ["no_mouth", "blank", "poker_face"]},
"1f610": {"canonical_name": "neutral", "aliases": []},
"1f611": {"canonical_name": "expressionless", "aliases": []},
"1f62f": {"canonical_name": "hushed", "aliases": []},
"1f626": {"canonical_name": "frowning", "aliases": []},
# pained from https://beebom.com/emoji-meanings/
"1f627": {"canonical_name": "anguished", "aliases": ["pained"]},
# surprise from https://emojipedia.org/face-with-open-mouth/
"1f62e": {"canonical_name": "open_mouth", "aliases": ["surprise"]},
"1f632": {"canonical_name": "astonished", "aliases": []},
"1f635": {"canonical_name": "dizzy", "aliases": []},
# the alternates are from https://emojipedia.org/flushed-face/. shame
# doesn't work with the Google emoji
"1f633": {"canonical_name": "flushed", "aliases": ["embarrassed", "blushing"]},
"1f631": {"canonical_name": "scream", "aliases": []},
# scared from https://emojipedia.org/fearful-face/, shock seemed like a
# nice addition
"1f628": {"canonical_name": "fear", "aliases": ["scared", "shock"]},
"1f630": {"canonical_name": "cold_sweat", "aliases": []},
"1f622": {"canonical_name": "cry", "aliases": []},
# stressed from https://beebom.com/emoji-meanings/. The internet generally
# didn't seem to know what to make of the disappointed_relieved name, and I
# got the sense it wasn't an emotion that was often used. Hence replaced it
# with exhausted.
"1f625": {"canonical_name": "exhausted", "aliases": ["disappointed_relieved", "stressed"]},
"1f924": {"canonical_name": "drooling", "aliases": []},
"1f62d": {"canonical_name": "sob", "aliases": []},
"1f613": {"canonical_name": "sweat", "aliases": []},
"1f62a": {"canonical_name": "sleepy", "aliases": []},
"1f634": {"canonical_name": "sleeping", "aliases": []},
"1f644": {"canonical_name": "rolling_eyes", "aliases": []},
"1f914": {"canonical_name": "thinking", "aliases": []},
"1f925": {"canonical_name": "lying", "aliases": []},
# seems like best emoji for nervous/anxious
"1f62c": {"canonical_name": "grimacing", "aliases": ["nervous", "anxious"]},
# zip_it from https://mashable.com/2015/10/23/ios-9-1-emoji-guide/,
# lips_sealed from https://emojipedia.org/zipper-mouth-face/, rest seemed
# like reasonable additions
"1f910": {
"canonical_name": "silence",
"aliases": ["quiet", "hush", "zip_it", "lips_are_sealed"],
},
# queasy seemed like a natural addition
"1f922": {"canonical_name": "nauseated", "aliases": ["queasy"]},
"1f927": {"canonical_name": "sneezing", "aliases": []},
"1f637": {"canonical_name": "mask", "aliases": []},
# flu from https://mashable.com/2015/10/23/ios-9-1-emoji-guide/, sick from
# https://emojipedia.org/face-with-thermometer/, face_with_thermometer so
# it shows up in typeahead (thermometer taken by Objects/82)
"1f912": {
"canonical_name": "sick",
"aliases": ["flu", "face_with_thermometer", "ill", "fever"],
},
# hurt and injured from https://beebom.com/emoji-meanings/. Chose hurt as
# primary since I think it can cover a wider set of things (e.g. emotional
# hurt)
"1f915": {"canonical_name": "hurt", "aliases": ["head_bandage", "injured"]},
# devil from https://emojipedia.org/smiling-face-with-horns/,
# smiling_face_with_horns from gemoji/Unicode
"1f608": {
"canonical_name": "smiling_devil",
"aliases": ["smiling_imp", "smiling_face_with_horns"],
},
# angry_devil from https://beebom.com/emoji-meanings/
"1f47f": {"canonical_name": "devil", "aliases": ["imp", "angry_devil"]},
"1f479": {"canonical_name": "ogre", "aliases": []},
"1f47a": {"canonical_name": "goblin", "aliases": []},
# pile_of_poo from gemoji/Unicode
"1f4a9": {"canonical_name": "poop", "aliases": ["pile_of_poo"]},
# alternates seemed like reasonable additions
"1f47b": {"canonical_name": "ghost", "aliases": ["boo", "spooky", "haunted"]},
"1f480": {"canonical_name": "skull", "aliases": []},
# alternates seemed like reasonable additions
"2620": {
"canonical_name": "skull_and_crossbones",
"aliases": ["pirate", "death", "hazard", "toxic", "poison"],
},
# ufo seemed like a natural addition
"1f47d": {"canonical_name": "alien", "aliases": ["ufo"]},
"1f47e": {"canonical_name": "space_invader", "aliases": []},
"1f916": {"canonical_name": "robot", "aliases": []},
# pumpkin seemed like a natural addition
"1f383": {"canonical_name": "jack-o-lantern", "aliases": ["pumpkin"]},
"1f63a": {"canonical_name": "smiley_cat", "aliases": []},
"1f638": {"canonical_name": "smile_cat", "aliases": []},
"1f639": {"canonical_name": "joy_cat", "aliases": []},
"1f63b": {"canonical_name": "heart_eyes_cat", "aliases": []},
# smug_cat to parallel People/31
"1f63c": {"canonical_name": "smirk_cat", "aliases": ["smug_cat"]},
"1f63d": {"canonical_name": "kissing_cat", "aliases": []},
# weary_cat from Unicode/gemoji
"1f640": {"canonical_name": "scream_cat", "aliases": ["weary_cat"]},
"1f63f": {"canonical_name": "crying_cat", "aliases": []},
# angry_cat to better parallel People/45
"1f63e": {"canonical_name": "angry_cat", "aliases": ["pouting_cat"]},
"1f450": {"canonical_name": "open_hands", "aliases": []},
# praise from
# https://emojipedia.org/person-raising-both-hands-in-celebration/
"1f64c": {"canonical_name": "raised_hands", "aliases": ["praise"]},
# applause from https://emojipedia.org/clapping-hands-sign/
"1f44f": {"canonical_name": "clap", "aliases": ["applause"]},
# welcome and thank_you from
# https://emojipedia.org/person-with-folded-hands/, namaste from Indian
# culture
"1f64f": {"canonical_name": "pray", "aliases": ["welcome", "thank_you", "namaste"]},
# done_deal seems like a natural addition
"1f91d": {"canonical_name": "handshake", "aliases": ["done_deal"]},
"1f44d": {"canonical_name": "+1", "aliases": ["thumbs_up", "like"]},
"1f44e": {"canonical_name": "-1", "aliases": ["thumbs_down"]},
# fist_bump from https://beebom.com/emoji-meanings/
"1f44a": {"canonical_name": "fist_bump", "aliases": ["punch"]},
# used as power in social justice movements
"270a": {"canonical_name": "fist", "aliases": ["power"]},
"1f91b": {"canonical_name": "left_fist", "aliases": []},
"1f91c": {"canonical_name": "right_fist", "aliases": []},
"1f91e": {"canonical_name": "fingers_crossed", "aliases": []},
# seems to be mostly used as peace on twitter
"270c": {"canonical_name": "peace_sign", "aliases": ["victory"]},
# https://emojipedia.org/sign-of-the-horns/
"1f918": {"canonical_name": "rock_on", "aliases": ["sign_of_the_horns"]},
# got_it seems like a natural addition
"1f44c": {"canonical_name": "ok", "aliases": ["got_it"]},
"1f448": {"canonical_name": "point_left", "aliases": []},
"1f449": {"canonical_name": "point_right", "aliases": []},
# :this: is a way of emphasizing the previous message. point_up instead of
# point_up_2 so that point_up better matches the other point_*s
"1f446": {"canonical_name": "point_up", "aliases": ["this"]},
"1f447": {"canonical_name": "point_down", "aliases": []},
# People/114 is point_up. These seemed better than naming it point_up_2,
# and point_of_information means it will come up in typeahead for 'point'
"261d": {
"canonical_name": "wait_one_second",
"aliases": ["point_of_information", "asking_a_question"],
},
"270b": {"canonical_name": "hand", "aliases": ["raised_hand"]},
# seems like best emoji for stop, raised_back_of_hand doesn't seem that
# useful
"1f91a": {"canonical_name": "stop", "aliases": []},
# seems like best emoji for high_five, raised_hand_with_fingers_splayed
# doesn't seem that useful
"1f590": {"canonical_name": "high_five", "aliases": ["palm"]},
# https://mashable.com/2015/10/23/ios-9-1-emoji-guide/
"1f596": {"canonical_name": "spock", "aliases": ["live_long_and_prosper"]},
# People/119 is a better 'hi', but 'hi' will never show up in the typeahead
# due to 'high_five'
"1f44b": {"canonical_name": "wave", "aliases": ["hello", "hi"]},
"1f919": {"canonical_name": "call_me", "aliases": []},
# flexed_biceps from gemoji/Unicode, strong seemed like a good addition
"1f4aa": {"canonical_name": "muscle", "aliases": []},
"1f595": {"canonical_name": "middle_finger", "aliases": []},
"270d": {"canonical_name": "writing", "aliases": []},
"1f933": {"canonical_name": "selfie", "aliases": []},
# Couldn't figure out why iamcal chose nail_care. Unicode uses nail_polish,
# gemoji uses both
"1f485": {"canonical_name": "nail_polish", "aliases": ["nail_care"]},
"1f48d": {"canonical_name": "ring", "aliases": []},
"1f484": {"canonical_name": "lipstick", "aliases": []},
# People/18 seems like a better kiss for most circumstances
"1f48b": {"canonical_name": "lipstick_kiss", "aliases": []},
# mouth from gemoji/Unicode
"1f444": {"canonical_name": "lips", "aliases": ["mouth"]},
"1f445": {"canonical_name": "tongue", "aliases": []},
"1f442": {"canonical_name": "ear", "aliases": []},
"1f443": {"canonical_name": "nose", "aliases": []},
# seems a better feet than Nature/86 (paw_prints)
"1f463": {"canonical_name": "footprints", "aliases": ["feet"]},
"1f441": {"canonical_name": "eye", "aliases": []},
# seemed the best emoji for looking
"1f440": {"canonical_name": "eyes", "aliases": ["looking"]},
"1f5e3": {"canonical_name": "speaking_head", "aliases": []},
# shadow seems like a good addition
"1f464": {"canonical_name": "silhouette", "aliases": ["shadow"]},
# to parallel People/139
"1f465": {"canonical_name": "silhouettes", "aliases": ["shadows"]},
"1f476": {"canonical_name": "baby", "aliases": []},
"1f466": {"canonical_name": "boy", "aliases": []},
"1f467": {"canonical_name": "girl", "aliases": []},
"1f468": {"canonical_name": "man", "aliases": []},
"1f469": {"canonical_name": "woman", "aliases": []},
# It's used on twitter a bunch, either when showing off hair, or in a way
# where People/144 would substitute. It'd be nice if there were another
# emoji one could use for "good hair", but I think not a big loss to not
# have one for Zulip, and not worth the eurocentrism.
# '1f471': {'canonical_name': 'X', 'aliases': ['person_with_blond_hair']},
# Added elderly since I think some people prefer that term
"1f474": {"canonical_name": "older_man", "aliases": ["elderly_man"]},
# Added elderly since I think some people prefer that term
"1f475": {"canonical_name": "older_woman", "aliases": ["elderly_woman"]},
"1f472": {"canonical_name": "gua_pi_mao", "aliases": []},
"1f473": {"canonical_name": "turban", "aliases": []},
# police seems like a more polite term, and matches the Unicode
"1f46e": {"canonical_name": "police", "aliases": ["cop"]},
"1f477": {"canonical_name": "construction_worker", "aliases": []},
"1f482": {"canonical_name": "guard", "aliases": []},
# detective from gemoji, sneaky from
# https://mashable.com/2015/10/23/ios-9-1-emoji-guide/, agent seems a
# reasonable addition
"1f575": {"canonical_name": "detective", "aliases": ["spy", "sleuth", "agent", "sneaky"]},
# mrs_claus from https://emojipedia.org/mother-christmas/
"1f936": {"canonical_name": "mother_christmas", "aliases": ["mrs_claus"]},
"1f385": {"canonical_name": "santa", "aliases": []},
"1f478": {"canonical_name": "princess", "aliases": []},
"1f934": {"canonical_name": "prince", "aliases": []},
"1f470": {"canonical_name": "bride", "aliases": []},
"1f935": {"canonical_name": "tuxedo", "aliases": []},
"1f47c": {"canonical_name": "angel", "aliases": []},
# expecting seems like a good addition
"1f930": {"canonical_name": "pregnant", "aliases": ["expecting"]},
"1f647": {"canonical_name": "bow", "aliases": []},
# mostly used sassily. person_tipping_hand from
# https://emojipedia.org/information-desk-person/
"1f481": {"canonical_name": "information_desk_person", "aliases": ["person_tipping_hand"]},
# no_signal to parallel People/207. Nope seems like a reasonable addition
"1f645": {"canonical_name": "no_signal", "aliases": ["nope"]},
"1f646": {"canonical_name": "ok_signal", "aliases": []},
# pick_me seems like a good addition
"1f64b": {"canonical_name": "raising_hand", "aliases": ["pick_me"]},
"1f926": {"canonical_name": "face_palm", "aliases": []},
"1f937": {"canonical_name": "shrug", "aliases": []},
"1f64e": {"canonical_name": "person_pouting", "aliases": []},
"1f64d": {"canonical_name": "person_frowning", "aliases": []},
"1f487": {"canonical_name": "haircut", "aliases": []},
"1f486": {"canonical_name": "massage", "aliases": []},
# hover seems like a reasonable addition
"1f574": {"canonical_name": "levitating", "aliases": ["hover"]},
"1f483": {"canonical_name": "dancer", "aliases": []},
"1f57a": {"canonical_name": "dancing", "aliases": ["disco"]},
"1f46f": {"canonical_name": "dancers", "aliases": []},
# pedestrian seems like reasonable addition
"1f6b6": {"canonical_name": "walking", "aliases": ["pedestrian"]},
"1f3c3": {"canonical_name": "running", "aliases": ["runner"]},
"1f46b": {"canonical_name": "man_and_woman_holding_hands", "aliases": ["man_and_woman_couple"]},
# to parallel People/234
"1f46d": {"canonical_name": "two_women_holding_hands", "aliases": ["women_couple"]},
# to parallel People/234
"1f46c": {"canonical_name": "two_men_holding_hands", "aliases": ["men_couple"]},
# no need for man-woman-boy, since we aren't including the other family
# combos
"1f46a": {"canonical_name": "family", "aliases": []},
"1f45a": {"canonical_name": "clothing", "aliases": []},
"1f455": {"canonical_name": "shirt", "aliases": ["tshirt"]},
# denim seems like a good addition
"1f456": {"canonical_name": "jeans", "aliases": ["denim"]},
# tie is shorter, and a bit more general
"1f454": {"canonical_name": "tie", "aliases": []},
"1f457": {"canonical_name": "dress", "aliases": []},
"1f459": {"canonical_name": "bikini", "aliases": []},
"1f458": {"canonical_name": "kimono", "aliases": []},
# I feel like this is always used in the plural
"1f460": {"canonical_name": "high_heels", "aliases": []},
# flip_flops seems like a reasonable addition
"1f461": {"canonical_name": "sandal", "aliases": ["flip_flops"]},
"1f462": {"canonical_name": "boot", "aliases": []},
"1f45e": {"canonical_name": "shoe", "aliases": []},
# running_shoe is from gemoji, sneaker seems like a reasonable addition
"1f45f": {"canonical_name": "athletic_shoe", "aliases": ["sneaker", "running_shoe"]},
"1f452": {"canonical_name": "hat", "aliases": []},
"1f3a9": {"canonical_name": "top_hat", "aliases": []},
# graduate seems like a better word for this
"1f393": {"canonical_name": "graduate", "aliases": ["mortar_board"]},
# king and queen seem like good additions
"1f451": {"canonical_name": "crown", "aliases": ["queen", "king"]},
# safety and invincibility inspired by
# https://mashable.com/2015/10/23/ios-9-1-emoji-guide/. hard_hat and
# rescue_worker seem like good additions
"26d1": {
"canonical_name": "helmet",
"aliases": ["hard_hat", "rescue_worker", "safety_first", "invincible"],
},
# backpack from gemoji, dominates satchel on Google Trends
"1f392": {"canonical_name": "backpack", "aliases": ["satchel"]},
"1f45d": {"canonical_name": "pouch", "aliases": []},
"1f45b": {"canonical_name": "purse", "aliases": []},
"1f45c": {"canonical_name": "handbag", "aliases": []},
"1f4bc": {"canonical_name": "briefcase", "aliases": []},
# glasses seems a more common term than eyeglasses, spectacles seems like a
# reasonable synonym to add
"1f453": {"canonical_name": "glasses", "aliases": ["spectacles"]},
"1f576": {"canonical_name": "dark_sunglasses", "aliases": []},
"1f302": {"canonical_name": "closed_umbrella", "aliases": []},
"2602": {"canonical_name": "umbrella", "aliases": []},
# Some animals have a Unicode codepoint "<animal>", some have a codepoint
# "<animal> face", and some have both. If an animal has just a single
# codepoint, we call it <animal>, regardless of what the codepoint is. If
# an animal has both, we call the "<animal>" codepoint <animal>, and come
# up with something else useful-seeming for the "<animal> face" codepoint.
# The reason we chose "<animal> face" for the non-standard name (instead of
# giving "<animal>" the non-standard name, as iamcal does) is because the
# apple emoji for the "<animal>"s are too realistic. E.g. Apple's Nature/76
# is less plausibly a puppy than this one.
"1f436": {"canonical_name": "puppy", "aliases": []},
"1f431": {"canonical_name": "kitten", "aliases": []},
"1f42d": {"canonical_name": "dormouse", "aliases": []},
"1f439": {"canonical_name": "hamster", "aliases": []},
"1f430": {"canonical_name": "bunny", "aliases": []},
"1f98a": {"canonical_name": "fox", "aliases": []},
"1f43b": {"canonical_name": "bear", "aliases": []},
"1f43c": {"canonical_name": "panda", "aliases": []},
"1f428": {"canonical_name": "koala", "aliases": []},
"1f42f": {"canonical_name": "tiger_cub", "aliases": []},
"1f981": {"canonical_name": "lion", "aliases": []},
"1f42e": {"canonical_name": "calf", "aliases": []},
"1f437": {"canonical_name": "piglet", "aliases": []},
"1f43d": {"canonical_name": "pig_nose", "aliases": []},
"1f438": {"canonical_name": "frog", "aliases": []},
"1f435": {"canonical_name": "monkey_face", "aliases": []},
"1f648": {"canonical_name": "see_no_evil", "aliases": []},
"1f649": {"canonical_name": "hear_no_evil", "aliases": []},
"1f64a": {"canonical_name": "speak_no_evil", "aliases": []},
"1f412": {"canonical_name": "monkey", "aliases": []},
# cluck seemed like a good addition
"1f414": {"canonical_name": "chicken", "aliases": ["cluck"]},
"1f427": {"canonical_name": "penguin", "aliases": []},
"1f426": {"canonical_name": "bird", "aliases": []},
"1f424": {"canonical_name": "chick", "aliases": ["baby_chick"]},
"1f423": {"canonical_name": "hatching", "aliases": ["hatching_chick"]},
# https://www.iemoji.com/view/emoji/668/animals-nature/front-facing-baby-chick
"1f425": {"canonical_name": "new_baby", "aliases": []},
"1f986": {"canonical_name": "duck", "aliases": []},
"1f985": {"canonical_name": "eagle", "aliases": []},
"1f989": {"canonical_name": "owl", "aliases": []},
"1f987": {"canonical_name": "bat", "aliases": []},
"1f43a": {"canonical_name": "wolf", "aliases": []},
"1f417": {"canonical_name": "boar", "aliases": []},
"1f434": {"canonical_name": "pony", "aliases": []},
"1f984": {"canonical_name": "unicorn", "aliases": []},
# buzz seemed like a reasonable addition
"1f41d": {"canonical_name": "bee", "aliases": ["buzz", "honeybee"]},
# caterpillar seemed like a reasonable addition
"1f41b": {"canonical_name": "bug", "aliases": ["caterpillar"]},
"1f98b": {"canonical_name": "butterfly", "aliases": []},
"1f40c": {"canonical_name": "snail", "aliases": []},
# spiral_shell from Unicode/gemoji, the others seemed like reasonable
# additions
"1f41a": {"canonical_name": "shell", "aliases": ["seashell", "conch", "spiral_shell"]},
# Unicode/gemoji have lady_beetle; hopefully with ladybug we get both the
# people that prefer lady_beetle (with beetle) and ladybug. There is also
# ladybird, but seems a bit much for this to complete for bird.
"1f41e": {"canonical_name": "beetle", "aliases": ["ladybug"]},
"1f41c": {"canonical_name": "ant", "aliases": []},
"1f577": {"canonical_name": "spider", "aliases": []},
"1f578": {"canonical_name": "web", "aliases": ["spider_web"]},
# tortoise seemed like a reasonable addition
"1f422": {"canonical_name": "turtle", "aliases": ["tortoise"]},
# put in a few animal sounds, including this one
"1f40d": {"canonical_name": "snake", "aliases": ["hiss"]},
"1f98e": {"canonical_name": "lizard", "aliases": ["gecko"]},
"1f982": {"canonical_name": "scorpion", "aliases": []},
"1f980": {"canonical_name": "crab", "aliases": []},
"1f991": {"canonical_name": "squid", "aliases": []},
"1f419": {"canonical_name": "octopus", "aliases": []},
"1f990": {"canonical_name": "shrimp", "aliases": []},
"1f420": {"canonical_name": "tropical_fish", "aliases": []},
"1f41f": {"canonical_name": "fish", "aliases": []},
"1f421": {"canonical_name": "blowfish", "aliases": []},
"1f42c": {"canonical_name": "dolphin", "aliases": ["flipper"]},
"1f988": {"canonical_name": "shark", "aliases": []},
"1f433": {"canonical_name": "whale", "aliases": []},
# https://emojipedia.org/whale/
"1f40b": {"canonical_name": "humpback_whale", "aliases": []},
"1f40a": {"canonical_name": "crocodile", "aliases": []},
"1f406": {"canonical_name": "leopard", "aliases": []},
"1f405": {"canonical_name": "tiger", "aliases": []},
"1f403": {"canonical_name": "water_buffalo", "aliases": []},
"1f402": {"canonical_name": "ox", "aliases": ["bull"]},
"1f404": {"canonical_name": "cow", "aliases": []},
"1f98c": {"canonical_name": "deer", "aliases": []},
# https://emojipedia.org/dromedary-camel/
"1f42a": {"canonical_name": "arabian_camel", "aliases": []},
"1f42b": {"canonical_name": "camel", "aliases": []},
"1f418": {"canonical_name": "elephant", "aliases": []},
"1f98f": {"canonical_name": "rhinoceros", "aliases": []},
"1f98d": {"canonical_name": "gorilla", "aliases": []},
"1f40e": {"canonical_name": "horse", "aliases": []},
"1f416": {"canonical_name": "pig", "aliases": ["oink"]},
"1f410": {"canonical_name": "goat", "aliases": []},
"1f40f": {"canonical_name": "ram", "aliases": []},
"1f411": {"canonical_name": "sheep", "aliases": ["baa"]},
"1f415": {"canonical_name": "dog", "aliases": ["woof"]},
"1f429": {"canonical_name": "poodle", "aliases": []},
"1f408": {"canonical_name": "cat", "aliases": ["meow"]},
# alarm seemed like a fun addition
"1f413": {"canonical_name": "rooster", "aliases": ["alarm", "cock-a-doodle-doo"]},
"1f983": {"canonical_name": "turkey", "aliases": []},
"1f54a": {"canonical_name": "dove", "aliases": ["dove_of_peace"]},
"1f407": {"canonical_name": "rabbit", "aliases": []},
"1f401": {"canonical_name": "mouse", "aliases": []},
"1f400": {"canonical_name": "rat", "aliases": []},
"1f43f": {"canonical_name": "chipmunk", "aliases": []},
# paws seemed like reasonable addition. Put feet at People/135
"1f43e": {"canonical_name": "paw_prints", "aliases": ["paws"]},
"1f409": {"canonical_name": "dragon", "aliases": []},
"1f432": {"canonical_name": "dragon_face", "aliases": []},
"1f335": {"canonical_name": "cactus", "aliases": []},
"1f384": {"canonical_name": "holiday_tree", "aliases": []},
"1f332": {"canonical_name": "evergreen_tree", "aliases": []},
"1f333": {"canonical_name": "tree", "aliases": ["deciduous_tree"]},
"1f334": {"canonical_name": "palm_tree", "aliases": []},
# sprout seemed like a reasonable addition
"1f331": {"canonical_name": "seedling", "aliases": ["sprout"]},
# seemed like the best emoji for plant
"1f33f": {"canonical_name": "herb", "aliases": ["plant"]},
# clover seemed like a reasonable addition
"2618": {"canonical_name": "shamrock", "aliases": ["clover"]},
# lucky seems more useful
"1f340": {"canonical_name": "lucky", "aliases": ["four_leaf_clover"]},
"1f38d": {"canonical_name": "bamboo", "aliases": []},
# https://emojipedia.org/tanabata-tree/
"1f38b": {"canonical_name": "wish_tree", "aliases": ["tanabata_tree"]},
# seemed like good additions. Used fall instead of autumn, since don't have
# the rest of the seasons, and could imagine someone using both meanings of
# fall.
"1f343": {"canonical_name": "leaves", "aliases": ["wind", "fall"]},
"1f342": {"canonical_name": "fallen_leaf", "aliases": []},
"1f341": {"canonical_name": "maple_leaf", "aliases": []},
"1f344": {"canonical_name": "mushroom", "aliases": []},
# harvest seems more useful
"1f33e": {"canonical_name": "harvest", "aliases": ["ear_of_rice"]},
"1f490": {"canonical_name": "bouquet", "aliases": []},
# seems like the best emoji for flower
"1f337": {"canonical_name": "tulip", "aliases": ["flower"]},
"1f339": {"canonical_name": "rose", "aliases": []},
# crushed suggest by a user
"1f940": {"canonical_name": "wilted_flower", "aliases": ["crushed"]},
"1f33b": {"canonical_name": "sunflower", "aliases": []},
"1f33c": {"canonical_name": "blossom", "aliases": []},
"1f338": {"canonical_name": "cherry_blossom", "aliases": []},
"1f33a": {"canonical_name": "hibiscus", "aliases": []},
"1f30e": {"canonical_name": "earth_americas", "aliases": []},
"1f30d": {"canonical_name": "earth_africa", "aliases": []},
"1f30f": {"canonical_name": "earth_asia", "aliases": []},
"1f315": {"canonical_name": "full_moon", "aliases": []},
# too many useless moons. Don't seem to get much use on twitter, and clog
# up typeahead for moon.
# '1f316': {'canonical_name': 'X', 'aliases': ['waning_crescent_moon']},
# '1f317': {'canonical_name': 'X', 'aliases': ['last_quarter_moon']},
# '1f318': {'canonical_name': 'X', 'aliases': ['waning_crescent_moon']},
"1f311": {"canonical_name": "new_moon", "aliases": []},
# '1f312': {'canonical_name': 'X', 'aliases': ['waxing_crescent_moon']},
# '1f313': {'canonical_name': 'X', 'aliases': ['first_quarter_moon']},
"1f314": {"canonical_name": "waxing_moon", "aliases": []},
"1f31a": {"canonical_name": "new_moon_face", "aliases": []},
"1f31d": {"canonical_name": "moon_face", "aliases": []},
"1f31e": {"canonical_name": "sun_face", "aliases": []},
# goodnight seems way more useful
"1f31b": {"canonical_name": "goodnight", "aliases": []},
# '1f31c': {'canonical_name': 'X', 'aliases': ['last_quarter_moon_with_face']},
# seems like the best emoji for moon
"1f319": {"canonical_name": "moon", "aliases": []},
# dizzy taken by People/54, had to come up with something else
"1f4ab": {"canonical_name": "seeing_stars", "aliases": []},
"2b50": {"canonical_name": "star", "aliases": []},
# glowing_star from gemoji/Unicode
"1f31f": {"canonical_name": "glowing_star", "aliases": []},
# glamour seems like a reasonable addition
"2728": {"canonical_name": "sparkles", "aliases": ["glamour"]},
# high_voltage from gemoji/Unicode
"26a1": {"canonical_name": "high_voltage", "aliases": ["zap"]},
# https://emojipedia.org/fire/
"1f525": {"canonical_name": "fire", "aliases": ["lit", "hot", "flame"]},
# explosion and crash seem like reasonable additions
"1f4a5": {"canonical_name": "boom", "aliases": ["explosion", "crash", "collision"]},
# meteor seems like a reasonable addition
"2604": {"canonical_name": "comet", "aliases": ["meteor"]},
"2600": {"canonical_name": "sunny", "aliases": []},
"1f324": {"canonical_name": "mostly_sunny", "aliases": []},
# partly_cloudy for the glass half empty people
"26c5": {"canonical_name": "partly_sunny", "aliases": ["partly_cloudy"]},
"1f325": {"canonical_name": "cloudy", "aliases": []},
# sunshowers seems like a more fun term
"1f326": {
"canonical_name": "sunshowers",
"aliases": ["sun_and_rain", "partly_sunny_with_rain"],
},
# pride and lgbtq seem like reasonable additions
"1f308": {"canonical_name": "rainbow", "aliases": ["pride", "lgbtq"]},
# overcast seems like a good addition
"2601": {"canonical_name": "cloud", "aliases": ["overcast"]},
# suggested by user typing these into their typeahead.
"1f327": {"canonical_name": "rainy", "aliases": ["soaked", "drenched"]},
# thunderstorm seems better for this emoji, and thunder_and_rain more
# evocative than thunder_cloud_and_rain
"26c8": {"canonical_name": "thunderstorm", "aliases": ["thunder_and_rain"]},
# lightning_storm seemed better than lightning_cloud
"1f329": {"canonical_name": "lightning", "aliases": ["lightning_storm"]},
# snowy to parallel sunny, cloudy, etc; snowstorm seems like a good
# addition
"1f328": {"canonical_name": "snowy", "aliases": ["snowstorm"]},
"2603": {"canonical_name": "snowman", "aliases": []},
# don't need two snowmen. frosty is nice because it's a weather (primary
# benefit) and also a snowman (one that suffered from not having snow, in
# fact)
"26c4": {"canonical_name": "frosty", "aliases": []},
"2744": {"canonical_name": "snowflake", "aliases": []},
# the internet didn't seem to have a good use for this emoji. windy is a
# good weather that is otherwise not represented. mother_nature from
# https://emojipedia.org/wind-blowing-face/
"1f32c": {"canonical_name": "windy", "aliases": ["mother_nature"]},
"1f4a8": {"canonical_name": "dash", "aliases": []},
# tornado_cloud comes from the Unicode, but e.g. gemoji drops the cloud
"1f32a": {"canonical_name": "tornado", "aliases": []},
# hazy seemed like a good addition
"1f32b": {"canonical_name": "fog", "aliases": ["hazy"]},
"1f30a": {"canonical_name": "ocean", "aliases": []},
# drop seems better than droplet, since could be used for its other
# meanings. water drop partly so that it shows up in typeahead for water
"1f4a7": {"canonical_name": "drop", "aliases": ["water_drop"]},
"1f4a6": {"canonical_name": "sweat_drops", "aliases": []},
"2614": {"canonical_name": "umbrella_with_rain", "aliases": []},
"1f34f": {"canonical_name": "green_apple", "aliases": []},
"1f34e": {"canonical_name": "apple", "aliases": []},
"1f350": {"canonical_name": "pear", "aliases": []},
# An argument for not calling this orange is to save the color for a color
# swatch, but we can deal with that when it happens. Mandarin is from
# https://emojipedia.org/tangerine/, also like that it has a second meaning
"1f34a": {"canonical_name": "orange", "aliases": ["tangerine", "mandarin"]},
"1f34b": {"canonical_name": "lemon", "aliases": []},
"1f34c": {"canonical_name": "banana", "aliases": []},
"1f349": {"canonical_name": "watermelon", "aliases": []},
"1f347": {"canonical_name": "grapes", "aliases": []},
"1f353": {"canonical_name": "strawberry", "aliases": []},
"1f348": {"canonical_name": "melon", "aliases": []},
"1f352": {"canonical_name": "cherries", "aliases": []},
"1f351": {"canonical_name": "peach", "aliases": []},
"1f34d": {"canonical_name": "pineapple", "aliases": []},
"1f95d": {"canonical_name": "kiwi", "aliases": []},
"1f951": {"canonical_name": "avocado", "aliases": []},
"1f345": {"canonical_name": "tomato", "aliases": []},
"1f346": {"canonical_name": "eggplant", "aliases": []},
"1f952": {"canonical_name": "cucumber", "aliases": []},
"1f955": {"canonical_name": "carrot", "aliases": []},
# maize is from Unicode
"1f33d": {"canonical_name": "corn", "aliases": ["maize"]},
# chili_pepper seems like a reasonable addition
"1f336": {"canonical_name": "hot_pepper", "aliases": ["chili_pepper"]},
"1f954": {"canonical_name": "potato", "aliases": []},
# yam seems better than sweet_potato, since we already have a potato (not a
# strong argument, but is better on the typeahead not to have emoji that
# share long prefixes)
"1f360": {"canonical_name": "yam", "aliases": ["sweet_potato"]},
"1f330": {"canonical_name": "chestnut", "aliases": []},
"1f95c": {"canonical_name": "peanuts", "aliases": []},
"1f36f": {"canonical_name": "honey", "aliases": []},
"1f950": {"canonical_name": "croissant", "aliases": []},
"1f35e": {"canonical_name": "bread", "aliases": []},
"1f956": {"canonical_name": "baguette", "aliases": []},
"1f9c0": {"canonical_name": "cheese", "aliases": []},
"1f95a": {"canonical_name": "egg", "aliases": []},
# already have an egg in Foods/31, though I guess wouldn't be a big deal to
# add it here.
"1f373": {"canonical_name": "cooking", "aliases": []},
"1f953": {"canonical_name": "bacon", "aliases": []},
# there's no lunch and dinner, which is a small negative against adding
# breakfast
"1f95e": {"canonical_name": "pancakes", "aliases": ["breakfast"]},
# There is already shrimp in Nature/51, and tempura seems like a better
# description
"1f364": {"canonical_name": "tempura", "aliases": []},
# drumstick seems like a better description
"1f357": {"canonical_name": "drumstick", "aliases": ["poultry"]},
"1f356": {"canonical_name": "meat", "aliases": []},
"1f355": {"canonical_name": "pizza", "aliases": []},
"1f32d": {"canonical_name": "hotdog", "aliases": []},
"1f354": {"canonical_name": "hamburger", "aliases": []},
"1f35f": {"canonical_name": "fries", "aliases": []},
# https://emojipedia.org/stuffed-flatbread/
"1f959": {
"canonical_name": "doner_kebab",
"aliases": ["shawarma", "souvlaki", "stuffed_flatbread"],
},
"1f32e": {"canonical_name": "taco", "aliases": []},
"1f32f": {"canonical_name": "burrito", "aliases": []},
"1f957": {"canonical_name": "salad", "aliases": []},
# I think Foods/49 is a better :food:
"1f958": {"canonical_name": "paella", "aliases": []},
"1f35d": {"canonical_name": "spaghetti", "aliases": []},
# seems like the best noodles? maybe this should be Foods/47? Noodles seem
# like a bigger thing in east asia than in europe, so going with that.
"1f35c": {"canonical_name": "ramen", "aliases": ["noodles"]},
# seems like the best :food:. Also a reasonable :soup:, though the Google
# one is indeed more a pot of food (the Unicode) than a soup
"1f372": {"canonical_name": "food", "aliases": ["soup", "stew"]},
# naruto is actual name, and I think don't need this to autocomplete for
# "fish"
"1f365": {"canonical_name": "naruto", "aliases": []},
"1f363": {"canonical_name": "sushi", "aliases": []},
"1f371": {"canonical_name": "bento", "aliases": []},
"1f35b": {"canonical_name": "curry", "aliases": []},
"1f35a": {"canonical_name": "rice", "aliases": []},
# onigiri is actual name, and I think don't need this to typeahead complete
# for "rice"
"1f359": {"canonical_name": "onigiri", "aliases": []},
# leaving rice_cracker in, so that we have something for cracker
"1f358": {"canonical_name": "senbei", "aliases": ["rice_cracker"]},
"1f362": {"canonical_name": "oden", "aliases": []},
"1f361": {"canonical_name": "dango", "aliases": []},
"1f367": {"canonical_name": "shaved_ice", "aliases": []},
# seemed like the best emoji for gelato
"1f368": {"canonical_name": "ice_cream", "aliases": ["gelato"]},
# already have ice_cream in Foods/60, and soft_serve seems like a
# potentially fun emoji to have in conjunction with ice_cream. Put in
# soft_ice_cream so it typeahead completes on ice_cream as well.
"1f366": {"canonical_name": "soft_serve", "aliases": ["soft_ice_cream"]},
"1f370": {"canonical_name": "cake", "aliases": []},
"1f382": {"canonical_name": "birthday", "aliases": []},
# flan seems like a reasonable addition
"1f36e": {"canonical_name": "custard", "aliases": ["flan"]},
"1f36d": {"canonical_name": "lollipop", "aliases": []},
"1f36c": {"canonical_name": "candy", "aliases": []},
"1f36b": {"canonical_name": "chocolate", "aliases": []},
"1f37f": {"canonical_name": "popcorn", "aliases": []},
# donut dominates doughnut on
# https://trends.google.com/trends/explore?q=doughnut,donut
"1f369": {"canonical_name": "donut", "aliases": ["doughnut"]},
"1f36a": {"canonical_name": "cookie", "aliases": []},
"1f95b": {"canonical_name": "milk", "aliases": ["glass_of_milk"]},
"1f37c": {"canonical_name": "baby_bottle", "aliases": []},
"2615": {"canonical_name": "coffee", "aliases": []},
"1f375": {"canonical_name": "tea", "aliases": []},
"1f376": {"canonical_name": "sake", "aliases": []},
"1f37a": {"canonical_name": "beer", "aliases": []},
"1f37b": {"canonical_name": "beers", "aliases": []},
"1f942": {"canonical_name": "clink", "aliases": ["toast"]},
"1f377": {"canonical_name": "wine", "aliases": []},
# tumbler means something different in india, and don't want to use
# shot_glass given our policy of using school-age-appropriate terms
"1f943": {"canonical_name": "small_glass", "aliases": []},
"1f378": {"canonical_name": "cocktail", "aliases": []},
"1f379": {"canonical_name": "tropical_drink", "aliases": []},
"1f37e": {"canonical_name": "champagne", "aliases": []},
"1f944": {"canonical_name": "spoon", "aliases": []},
# Added eating_utensils so this would show up in typeahead for eat.
"1f374": {"canonical_name": "fork_and_knife", "aliases": ["eating_utensils"]},
# Seems like the best emoji for hungry and meal. fork_and_knife_and_plate
# is from gemoji/Unicode, and I think is better than the shorter iamcal
# version in this case. The rest just seemed like good additions.
"1f37d": {
"canonical_name": "hungry",
"aliases": ["meal", "table_setting", "fork_and_knife_with_plate", "lets_eat"],
},
# most people interested in this sport call it football
"26bd": {"canonical_name": "football", "aliases": ["soccer"]},
"1f3c0": {"canonical_name": "basketball", "aliases": []},
# to distinguish from Activity/1, but is also the Unicode name
"1f3c8": {"canonical_name": "american_football", "aliases": []},
"26be": {"canonical_name": "baseball", "aliases": []},
"1f3be": {"canonical_name": "tennis", "aliases": []},
"1f3d0": {"canonical_name": "volleyball", "aliases": []},
"1f3c9": {"canonical_name": "rugby", "aliases": []},
# https://emojipedia.org/billiards/ suggests this is actually used for
# billiards, not for "unlucky" or "losing" or some other connotation of
# 8ball. The Unicode name is billiards.
"1f3b1": {"canonical_name": "billiards", "aliases": ["pool", "8_ball"]},
# ping pong is the Unicode name, and seems slightly more popular on
# https://trends.google.com/trends/explore?q=table%20tennis,ping%20pong
"1f3d3": {"canonical_name": "ping_pong", "aliases": ["table_tennis"]},
"1f3f8": {"canonical_name": "badminton", "aliases": []},
# gooooooooal seems more useful of a name, though arguably this isn't the
# best emoji for it
"1f945": {"canonical_name": "gooooooooal", "aliases": ["goal"]},
"1f3d2": {"canonical_name": "ice_hockey", "aliases": []},
"1f3d1": {"canonical_name": "field_hockey", "aliases": []},
# would say bat, but taken by Nature/30
"1f3cf": {"canonical_name": "cricket", "aliases": ["cricket_bat"]},
# hole_in_one seems like a more useful name to have. Sent golf to
# Activity/39
"26f3": {"canonical_name": "hole_in_one", "aliases": []},
# archery seems like a reasonable addition
"1f3f9": {"canonical_name": "bow_and_arrow", "aliases": ["archery"]},
"1f3a3": {"canonical_name": "fishing", "aliases": []},
"1f94a": {"canonical_name": "boxing_glove", "aliases": []},
# keikogi and dogi are the actual names for this, I believe. black_belt is
# I think a more useful name here
"1f94b": {"canonical_name": "black_belt", "aliases": ["keikogi", "dogi", "martial_arts"]},
"26f8": {"canonical_name": "ice_skate", "aliases": []},
"1f3bf": {"canonical_name": "ski", "aliases": []},
"26f7": {"canonical_name": "skier", "aliases": []},
"1f3c2": {"canonical_name": "snowboarder", "aliases": []},
# lift is both what lifters call it, and potentially can be used more
# generally than weight_lift. The others seemed like good additions.
"1f3cb": {"canonical_name": "lift", "aliases": ["work_out", "weight_lift", "gym"]},
# The decisions on tenses here and in the rest of the sports section are
# mostly from gut feel. The Unicode itself is all over the place.
"1f93a": {"canonical_name": "fencing", "aliases": []},
"1f93c": {"canonical_name": "wrestling", "aliases": []},
# seemed like reasonable additions
"1f938": {"canonical_name": "cartwheel", "aliases": ["acrobatics", "gymnastics", "tumbling"]},
# seemed the best emoji for sports
"26f9": {"canonical_name": "ball", "aliases": ["sports"]},
"1f93e": {"canonical_name": "handball", "aliases": []},
"1f3cc": {"canonical_name": "golf", "aliases": []},
"1f3c4": {"canonical_name": "surf", "aliases": []},
"1f3ca": {"canonical_name": "swim", "aliases": []},
"1f93d": {"canonical_name": "water_polo", "aliases": []},
# rest seem like reasonable additions
"1f6a3": {"canonical_name": "rowboat", "aliases": ["crew", "sculling", "rowing"]},
# horse_riding seems like a reasonable addition
"1f3c7": {"canonical_name": "horse_racing", "aliases": ["horse_riding"]},
# at least in the US: this = cyclist, Activity/53 = mountain biker, and
# motorcyclist = biker. Mainly from googling around and personal
# experience. E.g. https://grammarist.com/usage/cyclist-biker/ for cyclist
# and biker,
# https://www.theguardian.com/lifeandstyle/2010/oct/24/bike-snobs-guide-cycling-tribes
# for mountain biker (I've never heard the term "mountain cyclist", and
# they are the only group on that page that gets "biker" instead of
# "cyclist")
"1f6b4": {"canonical_name": "cyclist", "aliases": []},
# see Activity/51
"1f6b5": {"canonical_name": "mountain_biker", "aliases": []},
"1f3bd": {"canonical_name": "running_shirt", "aliases": []},
# I feel like people call sports medals "medals", and military medals
# "military medals". Also see Activity/56
"1f3c5": {"canonical_name": "medal", "aliases": []},
# See Activity/55. military_medal is the gemoji/Unicode
"1f396": {"canonical_name": "military_medal", "aliases": []},
# gold and number_one seem like good additions
"1f947": {"canonical_name": "first_place", "aliases": ["gold", "number_one"]},
# to parallel Activity/57
"1f948": {"canonical_name": "second_place", "aliases": ["silver"]},
# to parallel Activity/57
"1f949": {"canonical_name": "third_place", "aliases": ["bronze"]},
# seemed the best emoji for winner
"1f3c6": {"canonical_name": "trophy", "aliases": ["winner"]},
"1f3f5": {"canonical_name": "rosette", "aliases": []},
"1f397": {"canonical_name": "reminder_ribbon", "aliases": []},
# don't need ticket and admission_ticket (see Activity/64), so made one of
# them :pass:.
"1f3ab": {"canonical_name": "pass", "aliases": []},
# see Activity/63
"1f39f": {"canonical_name": "ticket", "aliases": []},
"1f3aa": {"canonical_name": "circus", "aliases": []},
"1f939": {"canonical_name": "juggling", "aliases": []},
# rest seem like good additions
"1f3ad": {"canonical_name": "performing_arts", "aliases": ["drama", "theater"]},
# rest seem like good additions
"1f3a8": {"canonical_name": "art", "aliases": ["palette", "painting"]},
# action seems more useful than clapper, and clapper doesn't seem like that
# common of a term
"1f3ac": {"canonical_name": "action", "aliases": []},
# seem like good additions
"1f3a4": {"canonical_name": "microphone", "aliases": ["mike", "mic"]},
"1f3a7": {"canonical_name": "headphones", "aliases": []},
"1f3bc": {"canonical_name": "musical_score", "aliases": []},
# piano seems more useful than musical_keyboard
"1f3b9": {"canonical_name": "piano", "aliases": ["musical_keyboard"]},
"1f941": {"canonical_name": "drum", "aliases": []},
"1f3b7": {"canonical_name": "saxophone", "aliases": []},
"1f3ba": {"canonical_name": "trumpet", "aliases": []},
"1f3b8": {"canonical_name": "guitar", "aliases": []},
"1f3bb": {"canonical_name": "violin", "aliases": []},
# dice seems more useful
"1f3b2": {"canonical_name": "dice", "aliases": ["die"]},
# direct_hit from gemoji/Unicode, and seems more useful. bulls_eye seemed
# like a reasonable addition
"1f3af": {"canonical_name": "direct_hit", "aliases": ["darts", "bulls_eye"]},
# strike seemed more useful than bowling
"1f3b3": {"canonical_name": "strike", "aliases": ["bowling"]},
"1f3ae": {"canonical_name": "video_game", "aliases": []},
# gambling seemed more useful than slot_machine
"1f3b0": {"canonical_name": "slot_machine", "aliases": []},
# the Google emoji for this is not red
"1f697": {"canonical_name": "car", "aliases": []},
# rideshare seems like a reasonable addition
"1f695": {"canonical_name": "taxi", "aliases": ["rideshare"]},
# the Google emoji for this is not blue. recreational_vehicle is from
# gemoji/Unicode, jeep seemed like a good addition
"1f699": {"canonical_name": "recreational_vehicle", "aliases": ["jeep"]},
# school_bus seemed like a reasonable addition, even though the twitter
# glyph for this doesn't really look like a school bus
"1f68c": {"canonical_name": "bus", "aliases": ["school_bus"]},
"1f68e": {"canonical_name": "trolley", "aliases": []},
"1f3ce": {"canonical_name": "racecar", "aliases": []},
"1f693": {"canonical_name": "police_car", "aliases": []},
"1f691": {"canonical_name": "ambulance", "aliases": []},
# https://trends.google.com/trends/explore?q=fire%20truck,fire%20engine
"1f692": {"canonical_name": "fire_truck", "aliases": ["fire_engine"]},
"1f690": {"canonical_name": "minibus", "aliases": []},
# moving_truck and truck for Places/11 and Places/12 seem much better than
# the iamcal names
"1f69a": {"canonical_name": "moving_truck", "aliases": []},
# see Places/11 for truck. Rest seem reasonable additions.
"1f69b": {
"canonical_name": "truck",
"aliases": ["tractor-trailer", "big_rig", "semi_truck", "transport_truck"],
},
"1f69c": {"canonical_name": "tractor", "aliases": []},
# kick_scooter and scooter seem better for Places/14 and Places /16 than
# scooter and motor_scooter.
"1f6f4": {"canonical_name": "kick_scooter", "aliases": []},
"1f6b2": {"canonical_name": "bike", "aliases": ["bicycle"]},
# see Places/14. Called motor_bike (or bike) in India
"1f6f5": {"canonical_name": "scooter", "aliases": ["motor_bike"]},
"1f3cd": {"canonical_name": "motorcycle", "aliases": []},
# siren seems more useful. alert seems like a reasonable addition
"1f6a8": {"canonical_name": "siren", "aliases": ["rotating_light", "alert"]},
"1f694": {"canonical_name": "oncoming_police_car", "aliases": []},
"1f68d": {"canonical_name": "oncoming_bus", "aliases": []},
# car to parallel e.g. Places/1
"1f698": {"canonical_name": "oncoming_car", "aliases": ["oncoming_automobile"]},
"1f696": {"canonical_name": "oncoming_taxi", "aliases": []},
# ski_lift seems like a good addition
"1f6a1": {"canonical_name": "aerial_tramway", "aliases": ["ski_lift"]},
# gondola seems more useful
"1f6a0": {"canonical_name": "gondola", "aliases": ["mountain_cableway"]},
"1f69f": {"canonical_name": "suspension_railway", "aliases": []},
# train_car seems like a reasonable addition
"1f683": {"canonical_name": "railway_car", "aliases": ["train_car"]},
# this does not seem like a good emoji for train, especially compared to
# Places/33. streetcar seems like a good addition.
"1f68b": {"canonical_name": "tram", "aliases": ["streetcar"]},
"1f69e": {"canonical_name": "mountain_railway", "aliases": []},
# elevated_train seems like a reasonable addition
"1f69d": {"canonical_name": "monorail", "aliases": ["elevated_train"]},
# from gemoji/Unicode. Also, don't think we need two bullettrain's
"1f684": {"canonical_name": "high_speed_train", "aliases": []},
# Google, Wikipedia, etc. prefer bullet train to bullettrain
"1f685": {"canonical_name": "bullet_train", "aliases": []},
"1f688": {"canonical_name": "light_rail", "aliases": []},
"1f682": {"canonical_name": "train", "aliases": ["steam_locomotive"]},
# oncoming_train seems better than train2
"1f686": {"canonical_name": "oncoming_train", "aliases": []},
# saving metro for Symbols/108. The tunnel makes subway more appropriate
# anyway.
"1f687": {"canonical_name": "subway", "aliases": []},
# all the glyphs of oncoming vehicles have names like oncoming_*. The
# alternate names are to parallel the alternates to Places/27.
"1f68a": {
"canonical_name": "oncoming_tram",
"aliases": ["oncoming_streetcar", "oncoming_trolley"],
},
"1f689": {"canonical_name": "station", "aliases": []},
"1f681": {"canonical_name": "helicopter", "aliases": []},
"1f6e9": {"canonical_name": "small_airplane", "aliases": []},
"2708": {"canonical_name": "airplane", "aliases": []},
# take_off seems more useful than airplane_departure. departure also seems
# more useful than airplane_departure. Arguably departure should be the
# primary, since arrival is probably more useful than landing in Places/42,
# but going with this for now.
"1f6eb": {"canonical_name": "take_off", "aliases": ["departure", "airplane_departure"]},
# parallel to Places/41
"1f6ec": {"canonical_name": "landing", "aliases": ["arrival", "airplane_arrival"]},
"1f680": {"canonical_name": "rocket", "aliases": []},
"1f6f0": {"canonical_name": "satellite", "aliases": []},
"1f4ba": {"canonical_name": "seat", "aliases": []},
"1f6f6": {"canonical_name": "canoe", "aliases": []},
"26f5": {"canonical_name": "boat", "aliases": ["sailboat"]},
"1f6e5": {"canonical_name": "motor_boat", "aliases": []},
"1f6a4": {"canonical_name": "speedboat", "aliases": []},
# yacht and cruise seem like reasonable additions
"1f6f3": {"canonical_name": "passenger_ship", "aliases": ["yacht", "cruise"]},
"26f4": {"canonical_name": "ferry", "aliases": []},
"1f6a2": {"canonical_name": "ship", "aliases": []},
"2693": {"canonical_name": "anchor", "aliases": []},
# there already is a construction in Places/82, and work_in_progress seems
# like a useful thing to have. Construction_zone seems better than the
# Unicode construction_sign, and is there partly so this autocompletes for
# construction.
"1f6a7": {"canonical_name": "work_in_progress", "aliases": ["construction_zone"]},
# alternates from https://emojipedia.org/fuel-pump/. Unicode is fuel_pump,
# not fuelpump
"26fd": {"canonical_name": "fuel_pump", "aliases": ["gas_pump", "petrol_pump"]},
# not sure why iamcal removed the space
"1f68f": {"canonical_name": "bus_stop", "aliases": []},
# https://emojipedia.org/vertical-traffic-light/ thinks this is the more
# common of the two traffic lights, so putting traffic_light on this one
"1f6a6": {"canonical_name": "traffic_light", "aliases": ["vertical_traffic_light"]},
# see Places/57
"1f6a5": {"canonical_name": "horizontal_traffic_light", "aliases": []},
# road_trip from https://mashable.com/2015/10/23/ios-9-1-emoji-guide/
"1f5fa": {"canonical_name": "map", "aliases": ["world_map", "road_trip"]},
# rock_carving, statue, and tower seem more general and less culturally
# specific, for Places/60, 61, and 63.
"1f5ff": {"canonical_name": "rock_carving", "aliases": ["moyai"]},
# new_york from https://emojipedia.org/statue-of-liberty/. see Places/60
# for statue
"1f5fd": {"canonical_name": "statue", "aliases": ["new_york", "statue_of_liberty"]},
"26f2": {"canonical_name": "fountain", "aliases": []},
# see Places/60
"1f5fc": {"canonical_name": "tower", "aliases": ["tokyo_tower"]},
# choosing this as the castle since castles are a way bigger thing in
# europe than japan, and shiro is a pretty reasonable name for Places/65
"1f3f0": {"canonical_name": "castle", "aliases": []},
# see Places/64
"1f3ef": {"canonical_name": "shiro", "aliases": []},
"1f3df": {"canonical_name": "stadium", "aliases": []},
"1f3a1": {"canonical_name": "ferris_wheel", "aliases": []},
"1f3a2": {"canonical_name": "roller_coaster", "aliases": []},
# merry_go_round seems like a good addition
"1f3a0": {"canonical_name": "carousel", "aliases": ["merry_go_round"]},
# beach_umbrella seems more useful
"26f1": {"canonical_name": "beach_umbrella", "aliases": []},
"1f3d6": {"canonical_name": "beach", "aliases": []},
"1f3dd": {"canonical_name": "island", "aliases": []},
"26f0": {"canonical_name": "mountain", "aliases": []},
"1f3d4": {"canonical_name": "snowy_mountain", "aliases": []},
# already lots of other mountains, otherwise would rename this like
# Places/60
"1f5fb": {"canonical_name": "mount_fuji", "aliases": []},
"1f30b": {"canonical_name": "volcano", "aliases": []},
"1f3dc": {"canonical_name": "desert", "aliases": []},
# campsite from https://emojipedia.org/camping/, I think Places/79 is a
# better camping
"1f3d5": {"canonical_name": "campsite", "aliases": []},
"26fa": {"canonical_name": "tent", "aliases": ["camping"]},
"1f6e4": {"canonical_name": "railway_track", "aliases": ["train_tracks"]},
# road is used much more frequently at
# https://trends.google.com/trends/explore?q=road,motorway
"1f6e3": {"canonical_name": "road", "aliases": ["motorway"]},
"1f3d7": {"canonical_name": "construction", "aliases": []},
"1f3ed": {"canonical_name": "factory", "aliases": []},
"1f3e0": {"canonical_name": "house", "aliases": []},
# suburb seems more useful
"1f3e1": {"canonical_name": "suburb", "aliases": []},
"1f3d8": {"canonical_name": "houses", "aliases": []},
# condemned seemed like a good addition
"1f3da": {"canonical_name": "derelict_house", "aliases": ["condemned"]},
"1f3e2": {"canonical_name": "office", "aliases": []},
"1f3ec": {"canonical_name": "department_store", "aliases": []},
"1f3e3": {"canonical_name": "japan_post", "aliases": []},
"1f3e4": {"canonical_name": "post_office", "aliases": []},
"1f3e5": {"canonical_name": "hospital", "aliases": []},
"1f3e6": {"canonical_name": "bank", "aliases": []},
"1f3e8": {"canonical_name": "hotel", "aliases": []},
"1f3ea": {"canonical_name": "convenience_store", "aliases": []},
"1f3eb": {"canonical_name": "school", "aliases": []},
"1f3e9": {"canonical_name": "love_hotel", "aliases": []},
"1f492": {"canonical_name": "wedding", "aliases": []},
"1f3db": {"canonical_name": "classical_building", "aliases": []},
"26ea": {"canonical_name": "church", "aliases": []},
"1f54c": {"canonical_name": "mosque", "aliases": []},
"1f54d": {"canonical_name": "synagogue", "aliases": []},
"1f54b": {"canonical_name": "kaaba", "aliases": []},
"26e9": {"canonical_name": "shinto_shrine", "aliases": []},
"1f5fe": {"canonical_name": "japan", "aliases": []},
# rice_scene seems like a strange name to have. gemoji alternate is
# moon_ceremony
"1f391": {"canonical_name": "moon_ceremony", "aliases": []},
"1f3de": {"canonical_name": "national_park", "aliases": []},
# ocean_sunrise to parallel Places/109
"1f305": {"canonical_name": "sunrise", "aliases": ["ocean_sunrise"]},
"1f304": {"canonical_name": "mountain_sunrise", "aliases": []},
# shooting_star and wish seem like way better descriptions. gemoji/Unicode
# is shooting_star
"1f320": {"canonical_name": "shooting_star", "aliases": ["wish"]},
"1f387": {"canonical_name": "sparkler", "aliases": []},
"1f386": {"canonical_name": "fireworks", "aliases": []},
"1f307": {"canonical_name": "city_sunrise", "aliases": []},
"1f306": {"canonical_name": "sunset", "aliases": []},
# city and skyline seem more useful than cityscape
"1f3d9": {"canonical_name": "city", "aliases": ["skyline"]},
"1f303": {"canonical_name": "night", "aliases": []},
# night_sky seems like a good addition
"1f30c": {"canonical_name": "milky_way", "aliases": ["night_sky"]},
"1f309": {"canonical_name": "bridge", "aliases": []},
"1f301": {"canonical_name": "foggy", "aliases": []},
"231a": {"canonical_name": "watch", "aliases": []},
# Unicode/gemoji is mobile_phone. The rest seem like good additions
"1f4f1": {"canonical_name": "mobile_phone", "aliases": ["smartphone", "iphone", "android"]},
"1f4f2": {"canonical_name": "calling", "aliases": []},
# gemoji has laptop, even though the Google emoji for this does not look
# like a laptop
"1f4bb": {"canonical_name": "computer", "aliases": ["laptop"]},
"2328": {"canonical_name": "keyboard", "aliases": []},
"1f5a5": {"canonical_name": "desktop_computer", "aliases": []},
"1f5a8": {"canonical_name": "printer", "aliases": []},
# gemoji/Unicode is computer_mouse
"1f5b1": {"canonical_name": "computer_mouse", "aliases": []},
"1f5b2": {"canonical_name": "trackball", "aliases": []},
# arcade seems like a reasonable addition
"1f579": {"canonical_name": "joystick", "aliases": ["arcade"]},
# vise seems like a reasonable addition
"1f5dc": {"canonical_name": "compression", "aliases": ["vise"]},
# gold record seems more useful, idea came from
# https://11points.com/11-emoji-different-meanings-think/
"1f4bd": {"canonical_name": "gold_record", "aliases": ["minidisc"]},
"1f4be": {"canonical_name": "floppy_disk", "aliases": []},
"1f4bf": {"canonical_name": "cd", "aliases": []},
"1f4c0": {"canonical_name": "dvd", "aliases": []},
# videocassette from gemoji/Unicode
"1f4fc": {"canonical_name": "vhs", "aliases": ["videocassette"]},
"1f4f7": {"canonical_name": "camera", "aliases": []},
# both of these seem more useful than camera_with_flash
"1f4f8": {"canonical_name": "taking_a_picture", "aliases": ["say_cheese"]},
# video_recorder seems like a reasonable addition
"1f4f9": {"canonical_name": "video_camera", "aliases": ["video_recorder"]},
"1f3a5": {"canonical_name": "movie_camera", "aliases": []},
# seems like the best emoji for movie
"1f4fd": {"canonical_name": "projector", "aliases": ["movie"]},
"1f39e": {"canonical_name": "film", "aliases": []},
# both of these seem more useful than telephone_receiver
"1f4de": {"canonical_name": "landline", "aliases": ["home_phone"]},
"260e": {"canonical_name": "phone", "aliases": ["telephone"]},
"1f4df": {"canonical_name": "pager", "aliases": []},
"1f4e0": {"canonical_name": "fax", "aliases": []},
"1f4fa": {"canonical_name": "tv", "aliases": ["television"]},
"1f4fb": {"canonical_name": "radio", "aliases": []},
"1f399": {"canonical_name": "studio_microphone", "aliases": []},
# volume seems more useful
"1f39a": {"canonical_name": "volume", "aliases": ["level_slider"]},
"1f39b": {"canonical_name": "control_knobs", "aliases": []},
"23f1": {"canonical_name": "stopwatch", "aliases": []},
"23f2": {"canonical_name": "timer", "aliases": []},
"23f0": {"canonical_name": "alarm_clock", "aliases": []},
"1f570": {"canonical_name": "mantelpiece_clock", "aliases": []},
# times_up and time_ticking seem more useful than the hourglass names
"231b": {"canonical_name": "times_up", "aliases": ["hourglass_done"]},
# seems like the better hourglass. Also see Objects/36
"23f3": {"canonical_name": "time_ticking", "aliases": ["hourglass"]},
"1f4e1": {"canonical_name": "satellite_antenna", "aliases": []},
# seems like a reasonable addition
"1f50b": {"canonical_name": "battery", "aliases": ["full_battery"]},
"1f50c": {"canonical_name": "electric_plug", "aliases": []},
# light_bulb seems better and from Unicode/gemoji. idea seems like a good
# addition
"1f4a1": {"canonical_name": "light_bulb", "aliases": ["bulb", "idea"]},
"1f526": {"canonical_name": "flashlight", "aliases": []},
"1f56f": {"canonical_name": "candle", "aliases": []},
# seems like a reasonable addition
"1f5d1": {"canonical_name": "wastebasket", "aliases": ["trash_can"]},
# https://www.iemoji.com/view/emoji/1173/objects/oil-drum
"1f6e2": {"canonical_name": "oil_drum", "aliases": ["commodities"]},
# losing money from https://emojipedia.org/money-with-wings/,
# easy_come_easy_go seems like a reasonable addition
"1f4b8": {
"canonical_name": "losing_money",
"aliases": ["easy_come_easy_go", "money_with_wings"],
},
# I think the _bills, _banknotes etc versions of these are arguably more
# fun to use in chat, and certainly match the glyphs better
"1f4b5": {"canonical_name": "dollar_bills", "aliases": []},
"1f4b4": {"canonical_name": "yen_banknotes", "aliases": []},
"1f4b6": {"canonical_name": "euro_banknotes", "aliases": []},
"1f4b7": {"canonical_name": "pound_notes", "aliases": []},
"1f4b0": {"canonical_name": "money", "aliases": []},
"1f4b3": {"canonical_name": "credit_card", "aliases": ["debit_card"]},
"1f48e": {"canonical_name": "gem", "aliases": ["crystal"]},
# justice seems more useful
"2696": {"canonical_name": "justice", "aliases": ["scales", "balance"]},
# fixing, at_work, and working_on_it seem like useful concepts for
# workplace chat
"1f527": {"canonical_name": "fixing", "aliases": ["wrench"]},
"1f528": {"canonical_name": "hammer", "aliases": ["maintenance", "handyman", "handywoman"]},
"2692": {"canonical_name": "at_work", "aliases": ["hammer_and_pick"]},
# something that might be useful for chat.zulip.org, even
"1f6e0": {"canonical_name": "working_on_it", "aliases": ["hammer_and_wrench", "tools"]},
"26cf": {"canonical_name": "mine", "aliases": ["pick"]},
# screw is somewhat inappropriate, but not openly so, so leaving it in
"1f529": {"canonical_name": "nut_and_bolt", "aliases": ["screw"]},
"2699": {"canonical_name": "gear", "aliases": ["settings", "mechanical", "engineer"]},
"26d3": {"canonical_name": "chains", "aliases": []},
"1f52b": {"canonical_name": "gun", "aliases": []},
"1f4a3": {"canonical_name": "bomb", "aliases": []},
# betrayed from https://www.iemoji.com/view/emoji/786/objects/kitchen-knife
"1f52a": {"canonical_name": "knife", "aliases": ["hocho", "betrayed"]},
# rated_for_violence from
# https://www.iemoji.com/view/emoji/1085/objects/dagger. hate (also
# suggested there) seems too strong, as does just "violence".
"1f5e1": {"canonical_name": "dagger", "aliases": ["rated_for_violence"]},
"2694": {"canonical_name": "duel", "aliases": ["swords"]},
"1f6e1": {"canonical_name": "shield", "aliases": []},
"1f6ac": {"canonical_name": "smoking", "aliases": []},
"26b0": {"canonical_name": "coffin", "aliases": ["burial", "grave"]},
"26b1": {"canonical_name": "funeral_urn", "aliases": ["cremation"]},
# amphora is too obscure, I think
"1f3fa": {"canonical_name": "vase", "aliases": ["amphora"]},
"1f52e": {"canonical_name": "crystal_ball", "aliases": ["oracle", "future", "fortune_telling"]},
"1f4ff": {"canonical_name": "prayer_beads", "aliases": []},
"1f488": {"canonical_name": "barber", "aliases": ["striped_pole"]},
# alchemy seems more useful and less obscure
"2697": {"canonical_name": "alchemy", "aliases": ["alembic"]},
"1f52d": {"canonical_name": "telescope", "aliases": []},
# science seems useful to have. scientist inspired by
# https://www.iemoji.com/view/emoji/787/objects/microscope
"1f52c": {"canonical_name": "science", "aliases": ["microscope", "scientist"]},
"1f573": {"canonical_name": "hole", "aliases": []},
"1f48a": {"canonical_name": "medicine", "aliases": ["pill"]},
"1f489": {"canonical_name": "injection", "aliases": ["syringe"]},
"1f321": {"canonical_name": "temperature", "aliases": ["thermometer", "warm"]},
"1f6bd": {"canonical_name": "toilet", "aliases": []},
"1f6b0": {"canonical_name": "potable_water", "aliases": ["tap_water", "drinking_water"]},
"1f6bf": {"canonical_name": "shower", "aliases": []},
"1f6c1": {"canonical_name": "bathtub", "aliases": []},
"1f6c0": {"canonical_name": "bath", "aliases": []},
# reception and services from
# https://www.iemoji.com/view/emoji/1169/objects/bellhop-bell
"1f6ce": {"canonical_name": "bellhop_bell", "aliases": ["reception", "services", "ding"]},
"1f511": {"canonical_name": "key", "aliases": []},
# encrypted from https://www.iemoji.com/view/emoji/1081/objects/old-key,
# secret from https://mashable.com/2015/10/23/ios-9-1-emoji-guide/
"1f5dd": {
"canonical_name": "secret",
"aliases": ["dungeon", "old_key", "encrypted", "clue", "hint"],
},
"1f6aa": {"canonical_name": "door", "aliases": []},
"1f6cb": {
"canonical_name": "living_room",
"aliases": ["furniture", "couch_and_lamp", "lifestyles"],
},
"1f6cf": {"canonical_name": "bed", "aliases": ["bedroom"]},
# guestrooms from iemoji, would add hotel but taken by Places/94
"1f6cc": {"canonical_name": "in_bed", "aliases": ["accommodations", "guestrooms"]},
"1f5bc": {"canonical_name": "picture", "aliases": ["framed_picture"]},
"1f6cd": {"canonical_name": "shopping_bags", "aliases": []},
# https://trends.google.com/trends/explore?q=shopping%20cart,shopping%20trolley
"1f6d2": {"canonical_name": "shopping_cart", "aliases": ["shopping_trolley"]},
"1f381": {"canonical_name": "gift", "aliases": ["present"]},
# seemed like the best celebration
"1f388": {"canonical_name": "balloon", "aliases": ["celebration"]},
# from gemoji/Unicode
"1f38f": {"canonical_name": "carp_streamer", "aliases": ["flags"]},
"1f380": {"canonical_name": "ribbon", "aliases": ["decoration"]},
"1f38a": {"canonical_name": "confetti", "aliases": ["party_ball"]},
"1f389": {"canonical_name": "tada", "aliases": []},
"1f38e": {"canonical_name": "dolls", "aliases": []},
"1f3ee": {"canonical_name": "lantern", "aliases": ["izakaya_lantern"]},
"1f390": {"canonical_name": "wind_chime", "aliases": []},
"2709": {"canonical_name": "email", "aliases": ["envelope", "mail"]},
# seems useful for chat?
"1f4e9": {"canonical_name": "mail_sent", "aliases": ["sealed"]},
"1f4e8": {"canonical_name": "mail_received", "aliases": []},
"1f4e7": {"canonical_name": "e-mail", "aliases": []},
"1f48c": {"canonical_name": "love_letter", "aliases": []},
"1f4e5": {"canonical_name": "inbox", "aliases": []},
"1f4e4": {"canonical_name": "outbox", "aliases": []},
"1f4e6": {"canonical_name": "package", "aliases": []},
# price_tag from iemoji
"1f3f7": {"canonical_name": "label", "aliases": ["tag", "price_tag"]},
"1f4ea": {"canonical_name": "closed_mailbox", "aliases": []},
"1f4eb": {"canonical_name": "mailbox", "aliases": []},
"1f4ec": {"canonical_name": "unread_mail", "aliases": []},
"1f4ed": {"canonical_name": "inbox_zero", "aliases": ["empty_mailbox", "no_mail"]},
"1f4ee": {"canonical_name": "mail_dropoff", "aliases": []},
"1f4ef": {"canonical_name": "horn", "aliases": []},
"1f4dc": {"canonical_name": "scroll", "aliases": []},
# receipt seems more useful?
"1f4c3": {"canonical_name": "receipt", "aliases": []},
"1f4c4": {"canonical_name": "document", "aliases": ["paper", "file", "page"]},
"1f4d1": {"canonical_name": "place_holder", "aliases": []},
"1f4ca": {"canonical_name": "bar_chart", "aliases": []},
# seems like the best chart
"1f4c8": {"canonical_name": "chart", "aliases": ["upwards_trend", "growing", "increasing"]},
"1f4c9": {"canonical_name": "downwards_trend", "aliases": ["shrinking", "decreasing"]},
"1f5d2": {"canonical_name": "spiral_notepad", "aliases": []},
# '1f5d3': {'canonical_name': 'X', 'aliases': ['spiral_calendar_pad']},
# swapped the following two largely due to the emojione glyphs
"1f4c6": {"canonical_name": "date", "aliases": []},
"1f4c5": {"canonical_name": "calendar", "aliases": []},
"1f4c7": {"canonical_name": "rolodex", "aliases": ["card_index"]},
"1f5c3": {"canonical_name": "archive", "aliases": []},
"1f5f3": {"canonical_name": "ballot_box", "aliases": []},
"1f5c4": {"canonical_name": "file_cabinet", "aliases": []},
"1f4cb": {"canonical_name": "clipboard", "aliases": []},
# don't need two file_folders, so made this organize
"1f4c1": {"canonical_name": "organize", "aliases": ["file_folder"]},
"1f4c2": {"canonical_name": "folder", "aliases": []},
"1f5c2": {"canonical_name": "sort", "aliases": []},
"1f5de": {"canonical_name": "newspaper", "aliases": ["swat"]},
"1f4f0": {"canonical_name": "headlines", "aliases": []},
"1f4d3": {"canonical_name": "notebook", "aliases": ["composition_book"]},
"1f4d4": {"canonical_name": "decorative_notebook", "aliases": []},
"1f4d2": {"canonical_name": "ledger", "aliases": ["spiral_notebook"]},
# the glyphs here are the same as Objects/147-149 (with a different color),
# for all but Google
"1f4d5": {"canonical_name": "red_book", "aliases": ["closed_book"]},
"1f4d7": {"canonical_name": "green_book", "aliases": []},
"1f4d8": {"canonical_name": "blue_book", "aliases": []},
"1f4d9": {"canonical_name": "orange_book", "aliases": []},
"1f4da": {"canonical_name": "books", "aliases": []},
"1f4d6": {"canonical_name": "book", "aliases": ["open_book"]},
"1f516": {"canonical_name": "bookmark", "aliases": []},
"1f517": {"canonical_name": "link", "aliases": []},
"1f4ce": {"canonical_name": "paperclip", "aliases": ["attachment"]},
# office_supplies from https://mashable.com/2015/10/23/ios-9-1-emoji-guide/
"1f587": {"canonical_name": "office_supplies", "aliases": ["paperclip_chain", "linked"]},
"1f4d0": {"canonical_name": "carpenter_square", "aliases": ["triangular_ruler"]},
"1f4cf": {"canonical_name": "ruler", "aliases": ["straightedge"]},
"1f4cc": {"canonical_name": "push_pin", "aliases": ["thumb_tack"]},
"1f4cd": {"canonical_name": "pin", "aliases": ["sewing_pin"]},
"2702": {"canonical_name": "scissors", "aliases": []},
"1f58a": {"canonical_name": "pen", "aliases": ["ballpoint_pen"]},
"1f58b": {"canonical_name": "fountain_pen", "aliases": []},
# three of the four emoji sets just have a rightwards-facing objects/162
# '2712': {'canonical_name': 'X', 'aliases': ['black_nib']},
"1f58c": {"canonical_name": "paintbrush", "aliases": []},
"1f58d": {"canonical_name": "crayon", "aliases": []},
"1f4dd": {"canonical_name": "memo", "aliases": ["note"]},
"270f": {"canonical_name": "pencil", "aliases": []},
"1f50d": {"canonical_name": "search", "aliases": ["find", "magnifying_glass"]},
# '1f50e': {'canonical_name': 'X', 'aliases': ['mag_right']},
# https://emojipedia.org/lock-with-ink-pen/
"1f50f": {
"canonical_name": "privacy",
"aliases": ["key_signing", "digital_security", "protected"],
},
"1f510": {
"canonical_name": "secure",
"aliases": ["lock_with_key", "safe", "commitment", "loyalty"],
},
"1f512": {"canonical_name": "locked", "aliases": []},
"1f513": {"canonical_name": "unlocked", "aliases": []},
# seems the best glyph for love and love_you
"2764": {"canonical_name": "heart", "aliases": ["love", "love_you"]},
"1f49b": {"canonical_name": "yellow_heart", "aliases": ["heart_of_gold"]},
"1f49a": {"canonical_name": "green_heart", "aliases": ["envy"]},
"1f499": {"canonical_name": "blue_heart", "aliases": []},
"1f49c": {"canonical_name": "purple_heart", "aliases": ["bravery"]},
"1f5a4": {"canonical_name": "black_heart", "aliases": []},
"1f494": {"canonical_name": "broken_heart", "aliases": ["heartache"]},
"2763": {"canonical_name": "heart_exclamation", "aliases": []},
"1f495": {"canonical_name": "two_hearts", "aliases": []},
"1f49e": {"canonical_name": "revolving_hearts", "aliases": []},
"1f493": {"canonical_name": "heartbeat", "aliases": []},
"1f497": {"canonical_name": "heart_pulse", "aliases": ["growing_heart"]},
"1f496": {"canonical_name": "sparkling_heart", "aliases": []},
"1f498": {"canonical_name": "cupid", "aliases": ["smitten", "heart_arrow"]},
"1f49d": {"canonical_name": "gift_heart", "aliases": []},
"1f49f": {"canonical_name": "heart_box", "aliases": []},
"262e": {"canonical_name": "peace", "aliases": []},
"271d": {"canonical_name": "cross", "aliases": ["christianity"]},
"262a": {"canonical_name": "star_and_crescent", "aliases": ["islam"]},
"1f549": {"canonical_name": "om", "aliases": ["hinduism"]},
"2638": {"canonical_name": "wheel_of_dharma", "aliases": ["buddhism"]},
"2721": {"canonical_name": "star_of_david", "aliases": ["judaism"]},
# can't find any explanation of this at all. Is an alternate star of david?
# '1f52f': {'canonical_name': 'X', 'aliases': ['six_pointed_star']},
"1f54e": {"canonical_name": "menorah", "aliases": []},
"262f": {"canonical_name": "yin_yang", "aliases": []},
"2626": {"canonical_name": "orthodox_cross", "aliases": []},
"1f6d0": {"canonical_name": "place_of_worship", "aliases": []},
"26ce": {"canonical_name": "ophiuchus", "aliases": []},
"2648": {"canonical_name": "aries", "aliases": []},
"2649": {"canonical_name": "taurus", "aliases": []},
"264a": {"canonical_name": "gemini", "aliases": []},
"264b": {"canonical_name": "cancer", "aliases": []},
"264c": {"canonical_name": "leo", "aliases": []},
"264d": {"canonical_name": "virgo", "aliases": []},
"264e": {"canonical_name": "libra", "aliases": []},
"264f": {"canonical_name": "scorpius", "aliases": []},
"2650": {"canonical_name": "sagittarius", "aliases": []},
"2651": {"canonical_name": "capricorn", "aliases": []},
"2652": {"canonical_name": "aquarius", "aliases": []},
"2653": {"canonical_name": "pisces", "aliases": []},
"1f194": {"canonical_name": "id", "aliases": []},
"269b": {"canonical_name": "atom", "aliases": ["physics"]},
"2622": {"canonical_name": "radioactive", "aliases": ["nuclear"]},
"2623": {"canonical_name": "biohazard", "aliases": []},
"1f4f4": {"canonical_name": "phone_off", "aliases": []},
"1f4f3": {"canonical_name": "vibration_mode", "aliases": []},
# Japanese symbol. `canonical_name` taken from emojipedia.org.
"1f236": {"canonical_name": "japanese_not_free_of_charge_button", "aliases": ["u6709"]},
"1f250": {"canonical_name": "japanese_bargain_button", "aliases": ["ideograph_advantage"]},
"1f251": {"canonical_name": "japanese_acceptable_button", "aliases": ["accept"]},
"1f21a": {"canonical_name": "japanese_free_of_charge_button", "aliases": ["u7121"]},
"1f238": {"canonical_name": "japanese_application_button", "aliases": ["u7533"]},
"1f23a": {"canonical_name": "japanese_open_for_business_button", "aliases": ["u55b6"]},
"1f237": {"canonical_name": "japanese_monthly_amount_button", "aliases": ["u6708"]},
"3299": {"canonical_name": "japanese_secret_button", "aliases": ["secret"]},
"3297": {"canonical_name": "japanese_congratulations_button", "aliases": ["congratulations"]},
"1f234": {"canonical_name": "japanese_passing_grade_button", "aliases": ["u5408"]},
"1f235": {"canonical_name": "japanese_no_vacancy_button", "aliases": ["u6e80"]},
"1f239": {"canonical_name": "japanese_discount_button", "aliases": ["u5272"]},
"1f232": {"canonical_name": "japanese_prohibited_button", "aliases": ["u7981"]},
# End of Japanese symbol.
"2734": {"canonical_name": "eight_pointed_star", "aliases": []},
"1f19a": {"canonical_name": "vs", "aliases": []},
"1f4ae": {"canonical_name": "white_flower", "aliases": []},
"1f170": {"canonical_name": "a", "aliases": []},
"1f171": {"canonical_name": "b", "aliases": []},
"1f18e": {"canonical_name": "ab", "aliases": []},
"1f191": {"canonical_name": "cl", "aliases": []},
"1f17e": {"canonical_name": "o", "aliases": []},
"1f198": {"canonical_name": "sos", "aliases": []},
# Symbols/105 seems like a better x, and looks more like the other letters
"274c": {"canonical_name": "cross_mark", "aliases": ["incorrect", "wrong"]},
"2b55": {"canonical_name": "circle", "aliases": []},
"1f6d1": {"canonical_name": "stop_sign", "aliases": ["octagonal_sign"]},
"26d4": {"canonical_name": "no_entry", "aliases": ["wrong_way"]},
"1f4db": {"canonical_name": "name_badge", "aliases": []},
"1f6ab": {"canonical_name": "prohibited", "aliases": ["not_allowed"]},
"1f4af": {"canonical_name": "100", "aliases": ["hundred"]},
"1f4a2": {"canonical_name": "anger", "aliases": ["bam", "pow"]},
"2668": {"canonical_name": "hot_springs", "aliases": []},
"1f6b7": {"canonical_name": "no_pedestrians", "aliases": []},
"1f6af": {"canonical_name": "do_not_litter", "aliases": []},
"1f6b3": {"canonical_name": "no_bicycles", "aliases": []},
"1f6b1": {"canonical_name": "non-potable_water", "aliases": []},
"1f51e": {"canonical_name": "underage", "aliases": ["nc17"]},
"1f4f5": {"canonical_name": "no_phones", "aliases": []},
"1f6ad": {"canonical_name": "no_smoking", "aliases": []},
"2757": {"canonical_name": "exclamation", "aliases": []},
"2755": {"canonical_name": "grey_exclamation", "aliases": []},
"2753": {"canonical_name": "question", "aliases": []},
"2754": {"canonical_name": "grey_question", "aliases": []},
"203c": {"canonical_name": "bangbang", "aliases": ["double_exclamation"]},
"2049": {"canonical_name": "interrobang", "aliases": []},
"1f505": {"canonical_name": "low_brightness", "aliases": ["dim"]},
"1f506": {"canonical_name": "brightness", "aliases": ["high_brightness"]},
"303d": {"canonical_name": "part_alternation", "aliases": []},
"26a0": {"canonical_name": "warning", "aliases": ["caution", "danger"]},
"1f6b8": {
"canonical_name": "children_crossing",
"aliases": ["school_crossing", "drive_with_care"],
},
"1f531": {"canonical_name": "trident", "aliases": []},
"269c": {"canonical_name": "fleur_de_lis", "aliases": []},
"1f530": {"canonical_name": "beginner", "aliases": []},
"267b": {"canonical_name": "recycle", "aliases": []},
# seems like the best check
"2705": {"canonical_name": "check", "aliases": ["all_good", "approved"]},
# '1f22f': {'canonical_name': 'X', 'aliases': ['u6307']},
# stock_market seemed more useful
"1f4b9": {"canonical_name": "stock_market", "aliases": []},
"2747": {"canonical_name": "sparkle", "aliases": []},
"2733": {"canonical_name": "eight_spoked_asterisk", "aliases": []},
"274e": {"canonical_name": "x", "aliases": []},
"1f310": {"canonical_name": "www", "aliases": ["globe"]},
"1f4a0": {"canonical_name": "cute", "aliases": ["kawaii", "diamond_with_a_dot"]},
"24c2": {"canonical_name": "metro", "aliases": ["m"]},
"1f300": {"canonical_name": "cyclone", "aliases": ["hurricane", "typhoon"]},
"1f4a4": {"canonical_name": "zzz", "aliases": []},
"1f3e7": {"canonical_name": "atm", "aliases": []},
"1f6be": {"canonical_name": "wc", "aliases": ["water_closet"]},
"267f": {"canonical_name": "accessible", "aliases": ["wheelchair", "disabled"]},
"1f17f": {"canonical_name": "parking", "aliases": ["p"]},
# '1f233': {'canonical_name': 'X', 'aliases': ['u7a7a']},
# '1f202': {'canonical_name': 'X', 'aliases': ['sa']},
"1f6c2": {"canonical_name": "passport_control", "aliases": ["immigration"]},
"1f6c3": {"canonical_name": "customs", "aliases": []},
"1f6c4": {"canonical_name": "baggage_claim", "aliases": []},
"1f6c5": {"canonical_name": "locker", "aliases": ["locked_bag"]},
"1f6b9": {"canonical_name": "mens", "aliases": []},
"1f6ba": {"canonical_name": "womens", "aliases": []},
# seems more in line with the surrounding bathroom symbols
"1f6bc": {"canonical_name": "baby_change_station", "aliases": ["nursery"]},
"1f6bb": {"canonical_name": "restroom", "aliases": []},
"1f6ae": {"canonical_name": "put_litter_in_its_place", "aliases": []},
"1f3a6": {"canonical_name": "cinema", "aliases": ["movie_theater"]},
"1f4f6": {"canonical_name": "cell_reception", "aliases": ["signal_strength", "signal_bars"]},
# '1f201': {'canonical_name': 'X', 'aliases': ['koko']},
"1f523": {"canonical_name": "symbols", "aliases": []},
"2139": {"canonical_name": "info", "aliases": []},
"1f524": {"canonical_name": "abc", "aliases": []},
"1f521": {"canonical_name": "abcd", "aliases": ["alphabet"]},
"1f520": {"canonical_name": "capital_abcd", "aliases": ["capital_letters"]},
"1f196": {"canonical_name": "ng", "aliases": []},
# from Unicode/gemoji. Saving ok for People/111
"1f197": {"canonical_name": "squared_ok", "aliases": []},
# from Unicode, and to parallel Symbols/135. Saving up for Symbols/171
"1f199": {"canonical_name": "squared_up", "aliases": []},
"1f192": {"canonical_name": "cool", "aliases": []},
"1f195": {"canonical_name": "new", "aliases": []},
"1f193": {"canonical_name": "free", "aliases": []},
"0030-20e3": {"canonical_name": "zero", "aliases": []},
"0031-20e3": {"canonical_name": "one", "aliases": []},
"0032-20e3": {"canonical_name": "two", "aliases": []},
"0033-20e3": {"canonical_name": "three", "aliases": []},
"0034-20e3": {"canonical_name": "four", "aliases": []},
"0035-20e3": {"canonical_name": "five", "aliases": []},
"0036-20e3": {"canonical_name": "six", "aliases": []},
"0037-20e3": {"canonical_name": "seven", "aliases": []},
"0038-20e3": {"canonical_name": "eight", "aliases": []},
"0039-20e3": {"canonical_name": "nine", "aliases": []},
"1f51f": {"canonical_name": "ten", "aliases": []},
"1f522": {"canonical_name": "1234", "aliases": ["numbers"]},
"0023-20e3": {"canonical_name": "hash", "aliases": []},
"002a-20e3": {"canonical_name": "asterisk", "aliases": []},
"25b6": {"canonical_name": "play", "aliases": []},
"23f8": {"canonical_name": "pause", "aliases": []},
"23ef": {"canonical_name": "play_pause", "aliases": []},
# stop taken by People/118
"23f9": {"canonical_name": "stop_button", "aliases": []},
"23fa": {"canonical_name": "record", "aliases": []},
"23ed": {"canonical_name": "next_track", "aliases": ["skip_forward"]},
"23ee": {"canonical_name": "previous_track", "aliases": ["skip_back"]},
"23e9": {"canonical_name": "fast_forward", "aliases": []},
"23ea": {"canonical_name": "rewind", "aliases": ["fast_reverse"]},
"23eb": {"canonical_name": "double_up", "aliases": ["fast_up"]},
"23ec": {"canonical_name": "double_down", "aliases": ["fast_down"]},
"25c0": {"canonical_name": "play_reverse", "aliases": []},
"1f53c": {"canonical_name": "upvote", "aliases": ["up_button", "increase"]},
"1f53d": {"canonical_name": "downvote", "aliases": ["down_button", "decrease"]},
"27a1": {"canonical_name": "right", "aliases": ["east"]},
"2b05": {"canonical_name": "left", "aliases": ["west"]},
"2b06": {"canonical_name": "up", "aliases": ["north"]},
"2b07": {"canonical_name": "down", "aliases": ["south"]},
"2197": {"canonical_name": "upper_right", "aliases": ["north_east"]},
"2198": {"canonical_name": "lower_right", "aliases": ["south_east"]},
"2199": {"canonical_name": "lower_left", "aliases": ["south_west"]},
"2196": {"canonical_name": "upper_left", "aliases": ["north_west"]},
"2195": {"canonical_name": "up_down", "aliases": []},
"2194": {"canonical_name": "left_right", "aliases": ["swap"]},
"21aa": {"canonical_name": "forward", "aliases": ["right_hook"]},
"21a9": {"canonical_name": "reply", "aliases": ["left_hook"]},
"2934": {"canonical_name": "heading_up", "aliases": []},
"2935": {"canonical_name": "heading_down", "aliases": []},
"1f500": {"canonical_name": "shuffle", "aliases": []},
"1f501": {"canonical_name": "repeat", "aliases": []},
"1f502": {"canonical_name": "repeat_one", "aliases": []},
"1f504": {"canonical_name": "counterclockwise", "aliases": ["return"]},
"1f503": {"canonical_name": "clockwise", "aliases": []},
"1f3b5": {"canonical_name": "music", "aliases": []},
"1f3b6": {"canonical_name": "musical_notes", "aliases": []},
"2795": {"canonical_name": "plus", "aliases": ["add"]},
"2796": {"canonical_name": "minus", "aliases": ["subtract"]},
"2797": {"canonical_name": "division", "aliases": ["divide"]},
"2716": {"canonical_name": "multiplication", "aliases": ["multiply"]},
"1f4b2": {"canonical_name": "dollars", "aliases": []},
# There is no other exchange, so might as well generalize this
"1f4b1": {"canonical_name": "exchange", "aliases": []},
"2122": {"canonical_name": "tm", "aliases": ["trademark"]},
"3030": {"canonical_name": "wavy_dash", "aliases": []},
"27b0": {"canonical_name": "loop", "aliases": []},
# https://emojipedia.org/double-curly-loop/
"27bf": {"canonical_name": "double_loop", "aliases": ["voicemail"]},
"1f51a": {"canonical_name": "end", "aliases": []},
"1f519": {"canonical_name": "back", "aliases": []},
"1f51b": {"canonical_name": "on", "aliases": []},
"1f51d": {"canonical_name": "top", "aliases": []},
"1f51c": {"canonical_name": "soon", "aliases": []},
"2714": {"canonical_name": "check_mark", "aliases": []},
"2611": {"canonical_name": "checkbox", "aliases": []},
"1f518": {"canonical_name": "radio_button", "aliases": []},
"26aa": {"canonical_name": "white_circle", "aliases": []},
"26ab": {"canonical_name": "black_circle", "aliases": []},
"1f534": {"canonical_name": "red_circle", "aliases": []},
"1f535": {"canonical_name": "blue_circle", "aliases": []},
"1f53a": {"canonical_name": "red_triangle_up", "aliases": []},
"1f53b": {"canonical_name": "red_triangle_down", "aliases": []},
"1f538": {"canonical_name": "small_orange_diamond", "aliases": []},
"1f539": {"canonical_name": "small_blue_diamond", "aliases": []},
"1f536": {"canonical_name": "large_orange_diamond", "aliases": []},
"1f537": {"canonical_name": "large_blue_diamond", "aliases": []},
"1f533": {"canonical_name": "black_and_white_square", "aliases": []},
"1f532": {"canonical_name": "white_and_black_square", "aliases": []},
"25aa": {"canonical_name": "black_small_square", "aliases": []},
"25ab": {"canonical_name": "white_small_square", "aliases": []},
"25fe": {"canonical_name": "black_medium_small_square", "aliases": []},
"25fd": {"canonical_name": "white_medium_small_square", "aliases": []},
"25fc": {"canonical_name": "black_medium_square", "aliases": []},
"25fb": {"canonical_name": "white_medium_square", "aliases": []},
"2b1b": {"canonical_name": "black_large_square", "aliases": []},
"2b1c": {"canonical_name": "white_large_square", "aliases": []},
"1f7e8": {"canonical_name": "yellow_large_square", "aliases": []},
"1f7e9": {"canonical_name": "green_large_square", "aliases": []},
"1f508": {"canonical_name": "speaker", "aliases": []},
"1f507": {"canonical_name": "mute", "aliases": ["no_sound"]},
"1f509": {"canonical_name": "softer", "aliases": []},
"1f50a": {"canonical_name": "louder", "aliases": ["sound"]},
"1f514": {"canonical_name": "notifications", "aliases": ["bell"]},
"1f515": {"canonical_name": "mute_notifications", "aliases": []},
"1f4e3": {"canonical_name": "megaphone", "aliases": ["shout"]},
"1f4e2": {"canonical_name": "loudspeaker", "aliases": ["bullhorn"]},
"1f4ac": {"canonical_name": "umm", "aliases": ["speech_balloon"]},
"1f5e8": {"canonical_name": "speech_bubble", "aliases": []},
"1f4ad": {"canonical_name": "thought", "aliases": ["dream"]},
"1f5ef": {"canonical_name": "anger_bubble", "aliases": []},
"2660": {"canonical_name": "spades", "aliases": []},
"2663": {"canonical_name": "clubs", "aliases": []},
"2665": {"canonical_name": "hearts", "aliases": []},
"2666": {"canonical_name": "diamonds", "aliases": []},
"1f0cf": {"canonical_name": "joker", "aliases": []},
"1f3b4": {"canonical_name": "playing_cards", "aliases": []},
"1f004": {"canonical_name": "mahjong", "aliases": []},
# The only use I can think of for so many clocks is to be able to use them
# to vote on times and such in emoji reactions. But a) the experience is
# not that great (the images are too small), b) there are issues with
# 24-hour time (used in many countries), like what is 00:30 or 01:00
# called, c) it's hard to make the compose typeahead experience great, and
# d) we should have a dedicated time voting widget that takes care of
# time zone and locale issues, and uses a digital representation.
# '1f550': {'canonical_name': 'X', 'aliases': ['clock1']},
# '1f551': {'canonical_name': 'X', 'aliases': ['clock2']},
# '1f552': {'canonical_name': 'X', 'aliases': ['clock3']},
# '1f553': {'canonical_name': 'X', 'aliases': ['clock4']},
# '1f554': {'canonical_name': 'X', 'aliases': ['clock5']},
# '1f555': {'canonical_name': 'X', 'aliases': ['clock6']},
# '1f556': {'canonical_name': 'X', 'aliases': ['clock7']},
# seems like the best choice for time
"1f557": {"canonical_name": "time", "aliases": ["clock"]},
# '1f558': {'canonical_name': 'X', 'aliases': ['clock9']},
# '1f559': {'canonical_name': 'X', 'aliases': ['clock10']},
# '1f55a': {'canonical_name': 'X', 'aliases': ['clock11']},
# '1f55b': {'canonical_name': 'X', 'aliases': ['clock12']},
# '1f55c': {'canonical_name': 'X', 'aliases': ['clock130']},
# '1f55d': {'canonical_name': 'X', 'aliases': ['clock230']},
# '1f55e': {'canonical_name': 'X', 'aliases': ['clock330']},
# '1f55f': {'canonical_name': 'X', 'aliases': ['clock430']},
# '1f560': {'canonical_name': 'X', 'aliases': ['clock530']},
# '1f561': {'canonical_name': 'X', 'aliases': ['clock630']},
# '1f562': {'canonical_name': 'X', 'aliases': ['clock730']},
# '1f563': {'canonical_name': 'X', 'aliases': ['clock830']},
# '1f564': {'canonical_name': 'X', 'aliases': ['clock930']},
# '1f565': {'canonical_name': 'X', 'aliases': ['clock1030']},
# '1f566': {'canonical_name': 'X', 'aliases': ['clock1130']},
# '1f567': {'canonical_name': 'X', 'aliases': ['clock1230']},
"1f3f3": {"canonical_name": "white_flag", "aliases": ["surrender"]},
"1f3f4": {"canonical_name": "black_flag", "aliases": []},
"1f3c1": {"canonical_name": "checkered_flag", "aliases": ["race", "go", "start"]},
"1f6a9": {"canonical_name": "triangular_flag", "aliases": []},
# solidarity from iemoji
"1f38c": {"canonical_name": "crossed_flags", "aliases": ["solidarity"]},
}
|
zulip/zulip
|
tools/setup/emoji/emoji_names.py
|
Python
|
apache-2.0
| 96,073
|
[
"CRYSTAL",
"Octopus"
] |
46564e87ebd8ef7bdc4a105e57fe3c854dd1d7c80bdf89dd53254767405254ea
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint: python3
"""Tests for jax_dft.scf."""
import functools
import os
import shutil
import tempfile
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import random
from jax import tree_util
from jax.config import config
from jax.experimental import stax
import jax.numpy as jnp
import numpy as np
from scipy import optimize
from jax_dft import neural_xc
from jax_dft import np_utils
from jax_dft import scf
from jax_dft import utils
# Set the default dtype as float64
config.update('jax_enable_x64', True)
class ScfTest(parameterized.TestCase):
def test_discrete_laplacian(self):
np.testing.assert_allclose(
scf.discrete_laplacian(6),
[
[-5. / 2, 4. / 3, -1. / 12, 0., 0., 0.],
[4. / 3, -5. / 2, 4. / 3, -1. / 12, 0., 0.],
[-1. / 12, 4. / 3, -5. / 2, 4. / 3, -1. / 12, 0.],
[0., -1. / 12, 4. / 3, -5. / 2, 4. / 3, -1. / 12],
[0., 0., -1. / 12, 4. / 3, -5. / 2, 4. / 3],
[0., 0., 0., -1. / 12, 4. / 3, -5. / 2],
],
atol=1e-6)
def test_get_kinetic_matrix(self):
np.testing.assert_allclose(
scf.get_kinetic_matrix(grids=jnp.linspace(-10, 10, 6)),
[
[0.078125, -0.04166667, 0.00260417, 0., 0., 0.],
[-0.04166667, 0.078125, -0.04166667, 0.00260417, 0., 0.],
[0.00260417, -0.04166667, 0.078125, -0.04166667, 0.00260417, 0.],
[0., 0.00260417, -0.04166667, 0.078125, -0.04166667, 0.00260417],
[0., 0., 0.00260417, -0.04166667, 0.078125, -0.04166667],
[0., 0., 0., 0.00260417, -0.04166667, 0.078125],
],
atol=1e-6)
@parameterized.parameters(
# The normalized wavefunctions are
# [[0., 0., 1. / sqrt(0.1), 0., 0.],
# [0., -1. / sqrt(0.2), 0., 1. / sqrt(0.2), 0.]]
# Intensities
# [[0., 0., 10., 0., 0.],
# [0., 5., 0., 5., 0.]]
(1, np.array([0., 0., 10., 0., 0.])),
(2, np.array([0., 0., 20., 0., 0.])),
(3, np.array([0., 5., 20., 5., 0.])),
(4, np.array([0., 10., 20., 10., 0.])),
)
def test_wavefunctions_to_density(self, num_electrons, expected_density):
np.testing.assert_allclose(
scf.wavefunctions_to_density(
num_electrons=num_electrons,
wavefunctions=jnp.array([
[0., 0., 1., 0., 0.],
[0., -1., 0., 1., 0.],
]),
grids=jnp.arange(5) * 0.1),
expected_density)
@parameterized.parameters(
(1, -1.), # total_eigen_energies = -1.
(2, -2.), # total_eigen_energies = -1. - 1.
(3, 0.), # total_eigen_energies = -1. - 1. + 2.
(4, 2.), # total_eigen_energies = -1. - 1. + 2. + 2.
(5, 7.), # total_eigen_energies = -1. - 1. + 2. + 2. + 5.
(6, 12.), # total_eigen_energies = -1. - 1. + 2. + 2. + 5. + 5.
)
def test_get_total_eigen_energies(
self, num_electrons, expected_total_eigen_energies):
self.assertAlmostEqual(
scf.get_total_eigen_energies(
num_electrons=num_electrons,
eigen_energies=jnp.array([-1., 2., 5.])),
expected_total_eigen_energies)
@parameterized.parameters(
(1, 0.), # gap = -1. - (-1.)
(2, 3.), # gap = 2. - (-1.)
(3, 0.), # gap = 2. - 2.
(4, 7.), # gap = 9. - 2.
(5, 0.), # gap = 9. - 9.
(6, 78.), # gap = 87. - 9.
)
def test_get_gap(self, num_electrons, expected_gap):
self.assertAlmostEqual(
scf.get_gap(
num_electrons=num_electrons,
eigen_energies=jnp.array([-1., 2., 9., 87.])),
expected_gap)
@parameterized.parameters(
(1, 0.5, 0.), # total_eigen_energies = 0.5
(2, 1., 1.), # total_eigen_energies = 0.5 + 0.5
(3, 2.5, 0.), # total_eigen_energies = 0.5 + 0.5 + 1.5
(4, 4., 1.), # total_eigen_energies = 0.5 + 0.5 + 1.5 + 1.5
)
def test_solve_noninteracting_system(
self, num_electrons, expected_total_eigen_energies, expected_gap):
# Quantum harmonic oscillator.
grids = jnp.linspace(-10, 10, 1001)
density, total_eigen_energies, gap = scf.solve_noninteracting_system(
external_potential=0.5 * grids ** 2,
num_electrons=num_electrons,
grids=grids)
self.assertTupleEqual(density.shape, (1001,))
self.assertAlmostEqual(
float(total_eigen_energies), expected_total_eigen_energies, places=7)
self.assertAlmostEqual(float(gap), expected_gap, places=7)
@parameterized.parameters(utils.soft_coulomb, utils.exponential_coulomb)
def test_get_hartree_energy(self, interaction_fn):
grids = jnp.linspace(-5, 5, 11)
dx = utils.get_dx(grids)
density = utils.gaussian(grids=grids, center=1., sigma=1.)
# Compute the expected Hartree energy by nested for loops.
expected_hartree_energy = 0.
for x_0, n_0 in zip(grids, density):
for x_1, n_1 in zip(grids, density):
expected_hartree_energy += 0.5 * n_0 * n_1 * interaction_fn(
x_0 - x_1) * dx ** 2
self.assertAlmostEqual(
float(scf.get_hartree_energy(
density=density, grids=grids, interaction_fn=interaction_fn)),
float(expected_hartree_energy))
@parameterized.parameters(utils.soft_coulomb, utils.exponential_coulomb)
def test_get_hartree_potential(self, interaction_fn):
grids = jnp.linspace(-5, 5, 11)
dx = utils.get_dx(grids)
density = utils.gaussian(grids=grids, center=1., sigma=1.)
# Compute the expected Hartree energy by nested for loops.
expected_hartree_potential = np.zeros_like(grids)
for i, x_0 in enumerate(grids):
for x_1, n_1 in zip(grids, density):
expected_hartree_potential[i] += np.sum(
n_1 * interaction_fn(x_0 - x_1)) * dx
np.testing.assert_allclose(
scf.get_hartree_potential(
density=density, grids=grids, interaction_fn=interaction_fn),
expected_hartree_potential)
def test_get_external_potential_energy(self):
grids = jnp.linspace(-5, 5, 10001)
self.assertAlmostEqual(
float(scf.get_external_potential_energy(
external_potential=-jnp.exp(-grids ** 2),
density=jnp.exp(-(grids - 1) ** 2),
grids=grids)),
# Analytical solution:
# integrate(-exp(-x^2) * exp(-(x - 1) ^ 2), {x, -inf, inf})
# = -sqrt(pi / (2 * e))
-np.sqrt(np.pi / (2 * np.e)))
def test_get_xc_energy(self):
grids = jnp.linspace(-5, 5, 10001)
# We use the form of 3d LDA exchange functional as an example. So the
# correlation contribution is 0.
# exchange energy = -0.73855 \int n^(4 / 3) dx
# exchange energy density = -0.73855 n^(1 / 3)
# Compute the exchange energy on density exp(-(x - 1) ^ 2:
# -0.73855 * integrate(exp(-(x - 1) ^ 2) ^ (4 / 3), {x, -inf, inf})
# = -1.13367
xc_energy_density_fn = lambda density: -0.73855 * density ** (1 / 3)
density = jnp.exp(-(grids - 1) ** 2)
self.assertAlmostEqual(
float(scf.get_xc_energy(
density=density,
xc_energy_density_fn=xc_energy_density_fn,
grids=grids)),
-1.13367,
places=5)
def test_get_xc_potential(self):
grids = jnp.linspace(-5, 5, 10001)
# We use the form of 3d LDA exchange functional as an example. So the
# correlation contribution is 0.
# exchange energy = -0.73855 \int n^(4 / 3) dx
# exchange potential should be -0.73855 * (4 / 3) n^(1 / 3)
# by taking functional derivative on exchange energy.
xc_energy_density_fn = lambda density: -0.73855 * density ** (1 / 3)
density = jnp.exp(-(grids - 1) ** 2)
np.testing.assert_allclose(
scf.get_xc_potential(
density,
xc_energy_density_fn=xc_energy_density_fn,
grids=grids),
-0.73855 * (4 / 3) * density ** (1 / 3))
def test_get_xc_potential_hartree(self):
grids = jnp.linspace(-5, 5, 10001)
density = utils.gaussian(grids=grids, center=1., sigma=1.)
def half_hartree_potential(density):
return 0.5 * scf.get_hartree_potential(
density=density,
grids=grids,
interaction_fn=utils.exponential_coulomb)
np.testing.assert_allclose(
scf.get_xc_potential(
density=density,
xc_energy_density_fn=half_hartree_potential,
grids=grids),
scf.get_hartree_potential(
density, grids=grids, interaction_fn=utils.exponential_coulomb))
class KohnShamStateTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
super().tearDown()
def test_save_and_load_state(self):
# Make up a random KohnShamState.
state = scf.KohnShamState(
density=np.random.random((5, 100)),
total_energy=np.random.random(5,),
locations=np.random.random((5, 2)),
nuclear_charges=np.random.random((5, 2)),
external_potential=np.random.random((5, 100)),
grids=np.random.random((5, 100)),
num_electrons=np.random.randint(10, size=5),
hartree_potential=np.random.random((5, 100)))
save_dir = os.path.join(self.test_dir, 'test_state')
scf.save_state(save_dir, state)
loaded_state = scf.load_state(save_dir)
# Check fields.
self.assertEqual(state._fields, loaded_state._fields)
# Check values.
for field in state._fields:
value = getattr(state, field)
if value is None:
self.assertIsNone(getattr(loaded_state, field))
else:
np.testing.assert_allclose(value, getattr(loaded_state, field))
class KohnShamIterationTest(parameterized.TestCase):
def setUp(self):
super(KohnShamIterationTest, self).setUp()
self.grids = jnp.linspace(-5, 5, 101)
self.num_electrons = 2
def _create_testing_initial_state(self, interaction_fn):
locations = jnp.array([-0.5, 0.5])
nuclear_charges = jnp.array([1, 1])
return scf.KohnShamState(
density=self.num_electrons * utils.gaussian(
grids=self.grids, center=0., sigma=1.),
# Set initial energy as inf, the actual value is not used in Kohn-Sham
# calculation.
total_energy=jnp.inf,
locations=locations,
nuclear_charges=nuclear_charges,
external_potential=utils.get_atomic_chain_potential(
grids=self.grids,
locations=locations,
nuclear_charges=nuclear_charges,
interaction_fn=interaction_fn),
grids=self.grids,
num_electrons=self.num_electrons)
def _test_state(self, state, initial_state):
# The density in the next state should normalize to number of electrons.
self.assertAlmostEqual(
float(jnp.sum(state.density) * utils.get_dx(self.grids)),
self.num_electrons)
# The total energy should be finite after one iteration.
self.assertTrue(jnp.isfinite(state.total_energy))
self.assertLen(state.hartree_potential, len(state.grids))
self.assertLen(state.xc_potential, len(state.grids))
# locations, nuclear_charges, external_potential, grids and num_electrons
# remain unchanged.
np.testing.assert_allclose(initial_state.locations, state.locations)
np.testing.assert_allclose(
initial_state.nuclear_charges, state.nuclear_charges)
np.testing.assert_allclose(
initial_state.external_potential, state.external_potential)
np.testing.assert_allclose(initial_state.grids, state.grids)
self.assertEqual(initial_state.num_electrons, state.num_electrons)
self.assertGreater(state.gap, 0)
@parameterized.parameters(
(utils.soft_coulomb, True),
(utils.soft_coulomb, False),
(utils.exponential_coulomb, True),
(utils.exponential_coulomb, False),
)
def test_kohn_sham_iteration(
self, interaction_fn, enforce_reflection_symmetry):
initial_state = self._create_testing_initial_state(interaction_fn)
next_state = scf.kohn_sham_iteration(
state=initial_state,
num_electrons=self.num_electrons,
# Use 3d LDA exchange functional and zero correlation functional.
xc_energy_density_fn=tree_util.Partial(
lambda density: -0.73855 * density ** (1 / 3)),
interaction_fn=interaction_fn,
enforce_reflection_symmetry=enforce_reflection_symmetry)
self._test_state(next_state, initial_state)
@parameterized.parameters(
(utils.soft_coulomb, True),
(utils.soft_coulomb, False),
(utils.exponential_coulomb, True),
(utils.exponential_coulomb, False),
)
def test_kohn_sham_iteration_neural_xc(
self, interaction_fn, enforce_reflection_symmetry):
init_fn, xc_energy_density_fn = neural_xc.local_density_approximation(
stax.serial(stax.Dense(8), stax.Elu, stax.Dense(1)))
params_init = init_fn(rng=random.PRNGKey(0))
initial_state = self._create_testing_initial_state(interaction_fn)
next_state = scf.kohn_sham_iteration(
state=initial_state,
num_electrons=self.num_electrons,
xc_energy_density_fn=tree_util.Partial(
xc_energy_density_fn, params=params_init),
interaction_fn=interaction_fn,
enforce_reflection_symmetry=enforce_reflection_symmetry)
self._test_state(next_state, initial_state)
def test_kohn_sham_iteration_neural_xc_energy_loss_gradient(self):
# The network only has one layer.
# The initial params contains weights with shape (1, 1) and bias (1,).
init_fn, xc_energy_density_fn = neural_xc.local_density_approximation(
stax.serial(stax.Dense(1)))
init_params = init_fn(rng=random.PRNGKey(0))
initial_state = self._create_testing_initial_state(
utils.exponential_coulomb)
target_energy = 2.
spec, flatten_init_params = np_utils.flatten(init_params)
def loss(flatten_params, initial_state, target_energy):
state = scf.kohn_sham_iteration(
state=initial_state,
num_electrons=self.num_electrons,
xc_energy_density_fn=tree_util.Partial(
xc_energy_density_fn,
params=np_utils.unflatten(spec, flatten_params)),
interaction_fn=utils.exponential_coulomb,
enforce_reflection_symmetry=True)
return (state.total_energy - target_energy) ** 2
grad_fn = jax.grad(loss)
params_grad = grad_fn(
flatten_init_params,
initial_state=initial_state,
target_energy=target_energy)
# Check gradient values.
np.testing.assert_allclose(params_grad, [-8.549952, -14.754195])
# Check whether the gradient values match the numerical gradient.
np.testing.assert_allclose(
optimize.approx_fprime(
xk=flatten_init_params,
f=functools.partial(
loss, initial_state=initial_state, target_energy=target_energy),
epsilon=1e-9),
params_grad, atol=2e-3)
def test_kohn_sham_iteration_neural_xc_density_loss_gradient(self):
# The network only has one layer.
# The initial params contains weights with shape (1, 1) and bias (1,).
init_fn, xc_energy_density_fn = neural_xc.local_density_approximation(
stax.serial(stax.Dense(1)))
init_params = init_fn(rng=random.PRNGKey(0))
initial_state = self._create_testing_initial_state(
utils.exponential_coulomb)
target_density = (
utils.gaussian(grids=self.grids, center=-0.5, sigma=1.)
+ utils.gaussian(grids=self.grids, center=0.5, sigma=1.))
spec, flatten_init_params = np_utils.flatten(init_params)
def loss(flatten_params, initial_state, target_density):
state = scf.kohn_sham_iteration(
state=initial_state,
num_electrons=self.num_electrons,
xc_energy_density_fn=tree_util.Partial(
xc_energy_density_fn,
params=np_utils.unflatten(spec, flatten_params)),
interaction_fn=utils.exponential_coulomb,
enforce_reflection_symmetry=False)
return jnp.sum(jnp.abs(state.density - target_density)) * utils.get_dx(
self.grids)
grad_fn = jax.grad(loss)
params_grad = grad_fn(
flatten_init_params,
initial_state=initial_state,
target_density=target_density)
# Check gradient values.
np.testing.assert_allclose(params_grad, [-1.34137017, 0.], atol=5e-7)
# Check whether the gradient values match the numerical gradient.
np.testing.assert_allclose(
optimize.approx_fprime(
xk=flatten_init_params,
f=functools.partial(
loss,
initial_state=initial_state,
target_density=target_density),
epsilon=1e-9),
params_grad, atol=2e-4)
def test_kohn_sham_iteration_neural_xc_density_loss_gradient_symmetry(self):
# The network only has one layer.
# The initial params contains weights with shape (1, 1) and bias (1,).
init_fn, xc_energy_density_fn = neural_xc.local_density_approximation(
stax.serial(stax.Dense(1)))
init_params = init_fn(rng=random.PRNGKey(0))
initial_state = self._create_testing_initial_state(
utils.exponential_coulomb)
target_density = (
utils.gaussian(grids=self.grids, center=-0.5, sigma=1.)
+ utils.gaussian(grids=self.grids, center=0.5, sigma=1.))
spec, flatten_init_params = np_utils.flatten(init_params)
def loss(flatten_params, initial_state, target_density):
state = scf.kohn_sham_iteration(
state=initial_state,
num_electrons=self.num_electrons,
xc_energy_density_fn=tree_util.Partial(
xc_energy_density_fn,
params=np_utils.unflatten(spec, flatten_params)),
interaction_fn=utils.exponential_coulomb,
enforce_reflection_symmetry=True)
return jnp.sum(jnp.abs(state.density - target_density)) * utils.get_dx(
self.grids)
grad_fn = jax.grad(loss)
params_grad = grad_fn(
flatten_init_params,
initial_state=initial_state,
target_density=target_density)
# Check gradient values.
np.testing.assert_allclose(params_grad, [-1.34137017, 0.], atol=5e-7)
# Check whether the gradient values match the numerical gradient.
np.testing.assert_allclose(
optimize.approx_fprime(
xk=flatten_init_params,
f=functools.partial(
loss,
initial_state=initial_state,
target_density=target_density),
epsilon=1e-9),
params_grad, atol=1e-3)
class KohnShamTest(parameterized.TestCase):
def setUp(self):
super(KohnShamTest, self).setUp()
self.grids = jnp.linspace(-5, 5, 101)
self.num_electrons = 2
self.locations = jnp.array([-0.5, 0.5])
self.nuclear_charges = jnp.array([1, 1])
def _create_testing_external_potential(self, interaction_fn):
return utils.get_atomic_chain_potential(
grids=self.grids,
locations=self.locations,
nuclear_charges=self.nuclear_charges,
interaction_fn=interaction_fn)
def _test_state(self, state, external_potential):
# The density in the final state should normalize to number of electrons.
self.assertAlmostEqual(
float(jnp.sum(state.density) * utils.get_dx(self.grids)),
self.num_electrons)
# The total energy should be finite after iterations.
self.assertTrue(jnp.isfinite(state.total_energy))
self.assertLen(state.hartree_potential, len(state.grids))
self.assertLen(state.xc_potential, len(state.grids))
# locations, nuclear_charges, external_potential, grids and num_electrons
# remain unchanged.
np.testing.assert_allclose(state.locations, self.locations)
np.testing.assert_allclose(state.nuclear_charges, self.nuclear_charges)
np.testing.assert_allclose(
external_potential, state.external_potential)
np.testing.assert_allclose(state.grids, self.grids)
self.assertEqual(state.num_electrons, self.num_electrons)
self.assertGreater(state.gap, 0)
@parameterized.parameters(utils.soft_coulomb, utils.exponential_coulomb)
def test_kohn_sham(self, interaction_fn):
state = scf.kohn_sham(
locations=self.locations,
nuclear_charges=self.nuclear_charges,
num_electrons=self.num_electrons,
num_iterations=3,
grids=self.grids,
# Use 3d LDA exchange functional and zero correlation functional.
xc_energy_density_fn=tree_util.Partial(
lambda density: -0.73855 * density ** (1 / 3)),
interaction_fn=interaction_fn)
for single_state in scf.state_iterator(state):
self._test_state(
single_state,
self._create_testing_external_potential(interaction_fn))
@parameterized.parameters(
(-1., [False, False, False]),
(jnp.inf, [True, True, True]),
)
def test_kohn_sham_convergence(
self, density_mse_converge_tolerance, expected_converged):
state = scf.kohn_sham(
locations=self.locations,
nuclear_charges=self.nuclear_charges,
num_electrons=self.num_electrons,
num_iterations=3,
grids=self.grids,
# Use 3d LDA exchange functional and zero correlation functional.
xc_energy_density_fn=tree_util.Partial(
lambda density: -0.73855 * density ** (1 / 3)),
interaction_fn=utils.exponential_coulomb,
density_mse_converge_tolerance=density_mse_converge_tolerance)
np.testing.assert_allclose(state.converged, expected_converged)
@parameterized.parameters(utils.soft_coulomb, utils.exponential_coulomb)
def test_kohn_sham_neural_xc(self, interaction_fn):
init_fn, xc_energy_density_fn = neural_xc.local_density_approximation(
stax.serial(stax.Dense(8), stax.Elu, stax.Dense(1)))
params_init = init_fn(rng=random.PRNGKey(0))
state = scf.kohn_sham(
locations=self.locations,
nuclear_charges=self.nuclear_charges,
num_electrons=self.num_electrons,
num_iterations=3,
grids=self.grids,
xc_energy_density_fn=tree_util.Partial(
xc_energy_density_fn, params=params_init),
interaction_fn=interaction_fn)
for single_state in scf.state_iterator(state):
self._test_state(
single_state,
self._create_testing_external_potential(interaction_fn))
def test_kohn_sham_neural_xc_energy_loss_gradient(self):
# The network only has one layer.
# The initial params contains weights with shape (1, 1) and bias (1,).
init_fn, xc_energy_density_fn = neural_xc.local_density_approximation(
stax.serial(stax.Dense(1)))
init_params = init_fn(rng=random.PRNGKey(0))
target_energy = 2.
spec, flatten_init_params = np_utils.flatten(init_params)
def loss(flatten_params, target_energy):
state = scf.kohn_sham(
locations=self.locations,
nuclear_charges=self.nuclear_charges,
num_electrons=self.num_electrons,
num_iterations=3,
grids=self.grids,
xc_energy_density_fn=tree_util.Partial(
xc_energy_density_fn,
params=np_utils.unflatten(spec, flatten_params)),
interaction_fn=utils.exponential_coulomb)
final_state = scf.get_final_state(state)
return (final_state.total_energy - target_energy) ** 2
grad_fn = jax.grad(loss)
params_grad = grad_fn(flatten_init_params, target_energy=target_energy)
# Check gradient values.
np.testing.assert_allclose(params_grad, [-8.571627, -14.754749], atol=1e-6)
# Check whether the gradient values match the numerical gradient.
np.testing.assert_allclose(
optimize.approx_fprime(
xk=flatten_init_params,
f=functools.partial(loss, target_energy=target_energy),
epsilon=1e-8),
params_grad, atol=5e-3)
def test_kohn_sham_neural_xc_density_loss_gradient(self):
# The network only has one layer.
# The initial params contains weights with shape (1, 1) and bias (1,).
init_fn, xc_energy_density_fn = neural_xc.local_density_approximation(
stax.serial(stax.Dense(1)))
init_params = init_fn(rng=random.PRNGKey(0))
target_density = (
utils.gaussian(grids=self.grids, center=-0.5, sigma=1.)
+ utils.gaussian(grids=self.grids, center=0.5, sigma=1.))
spec, flatten_init_params = np_utils.flatten(init_params)
def loss(flatten_params, target_density):
state = scf.kohn_sham(
locations=self.locations,
nuclear_charges=self.nuclear_charges,
num_electrons=self.num_electrons,
num_iterations=3,
grids=self.grids,
xc_energy_density_fn=tree_util.Partial(
xc_energy_density_fn,
params=np_utils.unflatten(spec, flatten_params)),
interaction_fn=utils.exponential_coulomb,
density_mse_converge_tolerance=-1)
final_state = scf.get_final_state(state)
return jnp.sum(
jnp.abs(final_state.density - target_density)) * utils.get_dx(
self.grids)
grad_fn = jax.grad(loss)
params_grad = grad_fn(flatten_init_params, target_density=target_density)
# Check gradient values.
np.testing.assert_allclose(params_grad, [-1.596714, 0.], atol=2e-6)
# Check whether the gradient values match the numerical gradient.
np.testing.assert_allclose(
optimize.approx_fprime(
xk=flatten_init_params,
f=functools.partial(loss, target_density=target_density),
epsilon=1e-9),
params_grad, atol=3e-4)
class GetInitialDensityTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.states = scf.KohnShamState(
density=np.random.random((5, 100)),
total_energy=np.random.random(5,),
locations=np.random.random((5, 2)),
nuclear_charges=np.random.random((5, 2)),
external_potential=np.random.random((5, 100)),
grids=np.random.random((5, 100)),
num_electrons=np.random.randint(10, size=5))
def test_get_initial_density_exact(self):
np.testing.assert_allclose(
scf.get_initial_density(self.states, 'exact'),
self.states.density)
def test_get_initial_density_noninteracting(self):
initial_density = scf.get_initial_density(self.states, 'noninteracting')
self.assertEqual(initial_density.shape, (5, 100))
def test_get_initial_density_unknown(self):
with self.assertRaisesRegex(
ValueError, 'Unknown initialization method foo'):
scf.get_initial_density(self.states, 'foo')
if __name__ == '__main__':
absltest.main()
|
google-research/google-research
|
jax_dft/jax_dft/scf_test.py
|
Python
|
apache-2.0
| 27,277
|
[
"Gaussian"
] |
006ccde8567bef4e96bcb92c55e3f95466982d613e490d91434e0fa25548579a
|
"""Create genome index for BWA aligner."""
import shutil
from pathlib import Path
from plumbum import TEE
from resolwe.process import Cmd, DataField, DirField, FileField, Process, StringField
class BWAIndex(Process):
"""Create BWA genome index."""
slug = "bwa-index"
process_type = "data:index:bwa"
name = "BWA genome index"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"},
},
"resources": {
"cores": 1,
"memory": 16384,
},
}
category = "Genome index"
data_name = '{{ ref_seq.fasta.file|basename|default("?") }}'
version = "1.2.0"
class Input:
"""Input fields for BWAIndex."""
ref_seq = DataField(
"seq:nucleotide", label="Reference sequence (nucleotide FASTA)"
)
class Output:
"""Output fields to process BWAIndex."""
index = DirField(label="BWA index")
fastagz = FileField(label="FASTA file (compressed)")
fasta = FileField(label="FASTA file")
fai = FileField(label="FASTA file index")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
basename = Path(inputs.ref_seq.output.fasta.path).name
assert basename.endswith(".fasta")
name = basename[:-6]
index_dir = Path("BWA_index")
index_dir.mkdir()
shutil.copy(Path(inputs.ref_seq.output.fasta.path), Path.cwd())
shutil.copy(Path(inputs.ref_seq.output.fastagz.path), Path.cwd())
shutil.copy(Path(inputs.ref_seq.output.fai.path), Path.cwd())
args = [
"-p",
index_dir / f"{name}.fasta",
inputs.ref_seq.output.fasta.path,
]
return_code, _, _ = Cmd["bwa"]["index"][args] & TEE(retcode=None)
if return_code:
self.error("Error occurred while preparing the BWA index.")
outputs.index = index_dir.name
outputs.fasta = f"{name}.fasta"
outputs.fastagz = f"{name}.fasta.gz"
outputs.fai = f"{name}.fasta.fai"
outputs.species = inputs.ref_seq.output.species
outputs.build = inputs.ref_seq.output.build
|
genialis/resolwe-bio
|
resolwe_bio/processes/alignment/bwa_index.py
|
Python
|
apache-2.0
| 2,328
|
[
"BWA"
] |
63df2961dde98952d866b2625896f6485e68aea3a48599d12ae38434307a2972
|
"""Check options for all agents."""
import logging
import pytest
from DIRAC.tests.Utilities.assertingUtils import AgentOptionsTest
from DIRAC import S_OK
AGENTS = [('DIRAC.AccountingSystem.Agent.NetworkAgent', {'IgnoreOptions': ['MaxCycles', 'MessageQueueURI',
'BufferTimeout']}),
('DIRAC.ConfigurationSystem.Agent.Bdii2CSAgent', {'IgnoreOptions': ['BannedCEs', 'BannedSEs', 'DryRun',
'AlternativeBDIIs', 'VO']}),
('DIRAC.ConfigurationSystem.Agent.GOCDB2CSAgent', {'IgnoreOptions': ['Cycles', 'DryRun']}),
('DIRAC.ConfigurationSystem.Agent.VOMS2CSAgent', {'IgnoreOptions': ['VO']}),
('DIRAC.DataManagementSystem.Agent.CleanFTSDBAgent', {'IgnoreOptions': ['DeleteGraceDays']}),
('DIRAC.DataManagementSystem.Agent.FTS3Agent', {}),
('DIRAC.DataManagementSystem.Agent.FTSAgent', {'IgnoreOptions': ['StageFiles', 'UseProxies', 'shifterProxy',
'FTSPlacementValidityPeriod',
'SubmitCommand',
'MonitorCommand', 'PinTime',
'MaxActiveJobsPerRoute',
'MaxRequests', 'MonitoringInterval',
'ProcessJobRequests']}),
('DIRAC.FrameworkSystem.Agent.CAUpdateAgent', {}),
('DIRAC.FrameworkSystem.Agent.MyProxyRenewalAgent', {'IgnoreOptions': ['MinValidity', 'ValidityPeriod',
'MinimumLifeTime',
'RenewedLifeTime']}),
('DIRAC.FrameworkSystem.Agent.ErrorMessageMonitor', {}),
('DIRAC.FrameworkSystem.Agent.SystemLoggingDBCleaner', {'IgnoreOptions': ['RemoveDate']}),
('DIRAC.FrameworkSystem.Agent.TopErrorMessagesReporter', {}),
('DIRAC.RequestManagementSystem.Agent.CleanReqDBAgent', {'IgnoreOptions': ['KickLimit', 'KickGraceHours',
'DeleteGraceDays']}),
('DIRAC.RequestManagementSystem.Agent.RequestExecutingAgent', {'IgnoreOptions': ['MaxProcess',
'ProcessTaskTimeout',
'RequestsPerCycle',
'OperationHandlers',
'MinProcess', 'MaxAttempts',
'ProcessPoolQueueSize',
'ProcessPoolSleep',
'FTSMode',
'OperationHandlers'],
'SpecialMocks': {'gConfig': S_OK([])}}),
('DIRAC.ResourceStatusSystem.Agent.CacheFeederAgent', {}),
('DIRAC.ResourceStatusSystem.Agent.ElementInspectorAgent', {}),
('DIRAC.ResourceStatusSystem.Agent.EmailAgent', {}),
('DIRAC.ResourceStatusSystem.Agent.SiteInspectorAgent', {}),
('DIRAC.ResourceStatusSystem.Agent.SummarizeLogsAgent', {}),
('DIRAC.ResourceStatusSystem.Agent.TokenAgent', {}),
('DIRAC.StorageManagementSystem.Agent.RequestFinalizationAgent', {}),
('DIRAC.StorageManagementSystem.Agent.RequestPreparationAgent', {}),
('DIRAC.StorageManagementSystem.Agent.StageMonitorAgent', {}),
('DIRAC.StorageManagementSystem.Agent.StageRequestAgent', {'IgnoreOptions': ['PinLifetime']}),
('DIRAC.TransformationSystem.Agent.DataRecoveryAgent', {}),
('DIRAC.TransformationSystem.Agent.InputDataAgent', {'IgnoreOptions': ['DateKey', 'TransformationTypes']}),
('DIRAC.TransformationSystem.Agent.MCExtensionAgent', {'IgnoreOptions': ['TransformationTypes',
'TasksPerIteration',
'MaxFailureRate',
'MaxWaitingJobs']}),
('DIRAC.TransformationSystem.Agent.TaskManagerAgentBase', {'IgnoreOptions': ['PluginLocation',
'BulkSubmission', 'shifterProxy',
'ShifterCredentials',
'maxNumberOfThreads']}),
('DIRAC.TransformationSystem.Agent.TransformationAgent', {'IgnoreOptions': ['PluginLocation',
'transformationStatus',
'MaxFiles', 'MaxFilesToProcess',
'TransformationTypes',
'ReplicaCacheValidity',
'NoUnusedDelay',
'maxThreadsInPool']}),
('DIRAC.TransformationSystem.Agent.TransformationCleaningAgent', {'IgnoreOptions': ['EnableFlag',
'shifterProxy']}),
('DIRAC.TransformationSystem.Agent.ValidateOutputDataAgent', {'IgnoreOptions': ['TransformationTypes',
'DirectoryLocations',
'TransfIDMeta']}),
# ('DIRAC.TransformationSystem.Agent.RequestTaskAgent', {}), # not inheriting from AgentModule
# ('DIRAC.TransformationSystem.Agent.WorkflowTaskAgent', {}), # not inheriting from AgentModule
('DIRAC.WorkloadManagementSystem.Agent.JobAgent', {'IgnoreOptions': ['FillingModeFlag', 'JobWrapperTemplate',
'MinimumTimeLeft']}),
('DIRAC.WorkloadManagementSystem.Agent.JobCleaningAgent', {}),
('DIRAC.WorkloadManagementSystem.Agent.PilotStatusAgent', {'IgnoreOptions': ['PilotAccountingEnabled',
'ClearPilotsDelay',
'ClearAbortedPilotsDelay']}),
('DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent', {'IgnoreOptions': ['StalledTimeHours',
'FailedTimeHours',
'StalledJobsTolerantSites',
'Enable']}),
('DIRAC.WorkloadManagementSystem.Agent.StatesAccountingAgent', {}),
('DIRAC.WorkloadManagementSystem.Agent.StatesMonitoringAgent', {}),
('DIRAC.WorkloadManagementSystem.Agent.SiteDirector',
{'SpecialMocks': {'findGenericPilotCredentials': S_OK(('a', 'b'))}}),
# ('DIRAC.WorkloadManagementSystem.Agent.MultiProcessorSiteDirector', {}), # not inheriting from AgentModule
]
LOG = logging.getLogger('Test')
@pytest.mark.parametrize('agentPath, options', AGENTS)
def test_AgentOptions(agentPath, options, caplog, mocker):
"""Check that all options in ConfigTemplate are found in the initialize method, including default values."""
caplog.set_level(logging.DEBUG)
AgentOptionsTest(agentPath, options, mocker=mocker)
|
andresailer/DIRAC
|
ConfigurationSystem/test/Test_agentOptions.py
|
Python
|
gpl-3.0
| 8,791
|
[
"DIRAC"
] |
9d48ea083d75c5fbc960217dbc6bf632dc160df922f1dbe8c8fb19300829b00a
|
# -*- coding: utf-8 -*-
"""Batch processing and logic for organizing and binning multiple ADCPData objects.
Tools and methods for cateogizing/manipulating/visualizing data in ADCPy/ADCPData
format. This module is dependent upon adcpy.
This code is open source, and defined by the included MIT Copyright License
Designed for Python 2.7; NumPy 1.7; SciPy 0.11.0; Matplotlib 1.2.0
2014-09 - First Release; blsaenz, esatel
"""
import numpy as np # numpy 1.7
import glob
import os
import csv
import scipy.stats as sp
#import scipy.signal as sps
import scipy.stats.morestats as ssm
from matplotlib.dates import num2date#,date2num,
import datetime
import adcpy
def average_transects(transects,dxy,dz,plotline=None,return_adcpy=True,
stats=True,plotline_from_flow=False,sd_drop=0):
"""
This method takes a list of input ADCPy transect objects, and:
1) Projects and re-grids each transect to either the input plotline, or a best
fit of available projected xy locations;
2) Bin-averages the re-gridded U,V, and W velocities of input ADCPTransectData
objects
Inputs:
transects = list of ADCPTransectData objects
dxy = new grid spacing in the xy (or plotline) direction
dz = new regular grid spacing in the z direction (downward for transects)
plotline = optional dsignated line in the xy plane for projecting ensembles onto
return_adcpy = True: returns an ADCPData object containing averaged velocities
False: returns a 3D numpy array containing U,V,W gridded veloctiy
"""
n_transects = len(transects)
avg = transects[0].copy_minimum_data()
if n_transects > 1:
# ugly brute-force method ot find furthest points;
# ConvexHull-type approach is only available in more recent scipy
max_dist = 0.0
centers = [adcpy.util.centroid(a.xy) for a in transects]
for c1 in centers:
for c2 in centers:
max_dist = max(max_dist,adcpy.util.find_line_distance(c1,c2))
print "max dist:",max_dist
if max_dist > 30.0:
print 'WARNING: averaging transects with maximum centroid distance of %f m!'%max_dist
# gather position data for new grid generation
xy_data = np.vstack([transects[i].xy for i in range(n_transects)])
z_data = np.hstack([transects[i].bin_center_elevation for i in range(n_transects)])
# find common grid
if plotline is None:
if plotline_from_flow:
flows = transects[0].calc_ensemble_flow(range_from_velocities=False)
xy_line = adcpy.util.map_flow_to_line(xy_data,flows[:,0],flows[:,1])
else:
xy_line = adcpy.util.map_xy_to_line(xy_data)
else:
xy_line = plotline
# NEED logic around determining whether original data was negative down, positive up, etc
z_mm = np.array([np.max(z_data),np.min(z_data)])
(dd,xy_new_range,xy_new,z_new) = adcpy.util.new_xy_grid(xy_data,z_mm,dxy,dz,xy_line,True)
# initialize arrays
xy_bins = adcpy.util.find_regular_bin_edges_from_centers(xy_new_range)
z_bins = adcpy.util.find_regular_bin_edges_from_centers(z_new)
new_shape = [len(xy_new_range),len(z_new),3]
avg.velocity = np.empty(new_shape)
if stats:
avg.velocity_n = np.empty(new_shape)
avg.velocity_sd = np.empty(new_shape)
# generate linear xy,z,velocties for bin averaging, perform bin averaging
for i in range(3):
bin_ave_inputs = []
mtimes = []
for t in transects:
xx,yy,xy_range,xy_line = adcpy.util.find_projection_distances(t.xy,xy_line)
bin_ave_inputs.append(adcpy.util.xy_z_linearize_array(xy_range,
t.bin_center_elevation,
t.velocity[...,i]))
xy = np.hstack([bin_ave_inputs[j][0] for j in range(n_transects)])
z = np.hstack([bin_ave_inputs[j][1] for j in range(n_transects)])
values = np.hstack([bin_ave_inputs[j][2] for j in range(n_transects)])
bin_ave = adcpy.util.bin_average(xy,xy_bins,values,z,z_bins,return_stats=stats,sd_drop=sd_drop)
bin_ave = adcpy.util.un_flip_bin_average(xy_new_range,z_new,bin_ave)
if stats:
(avg.velocity[...,i],
avg.velocity_n[...,i],
avg.velocity_sd[...,i]) = bin_ave
else:
avg.velocity[...,i] = bin_ave[0]
# update adcpData object
avg.xy = xy_new
avg.bin_center_elevation = z_new
avg.n_ensembles = new_shape[0]
avg.n_bins = new_shape[1]
# report back
if return_adcpy:
avg.xy_srs = transects[0].xy_srs
sources = [transects[i].source for i in range(n_transects)]
avg.source = "\n".join(sources)
mtimes = [sp.nanmedian(transects[i].mtime) for i in range(n_transects)]
mtimes = np.array(filter(None,mtimes))
if mtimes.any():
avg.mtime = np.ones(new_shape[0],np.float64) * sp.nanmean(mtimes)
if plotline is not None:
plotlinestr = "[%f,%f],[%f,%f]"%(plotline[0,0],
plotline[0,1],
plotline[1,0],
plotline[1,1])
else:
plotlinestr='None'
avg.history_append('average_transects(dxy=%f,dz=%f,plotline=%s)'%(dxy,dz,plotlinestr))
return avg
else:
return avg.velocity
def write_csv_velocity_array(adcp,csv_filename,no_header=False,un_rotate_velocties=True):
"""
Writes comma-delimited u,v,w velocties to a text file.
Inputs:
ADCP = ADCPData object
csv_filename = file path of output file
no_header = boolean, True = don't write header line
Returns:
nothing
"""
# direct dump of numpy array - opps required numpy v1.8
#np.savetext(csv_filename,adcp.velocity,delimiter=",")
if un_rotate_velocties:
v = adcp.get_unrotated_velocity()
else:
v = adcp.velocity
with open(csv_filename, 'wb') as csvfile:
arraywriter = csv.writer(csvfile, delimiter=',',
quoting=csv.QUOTE_MINIMAL)
if not no_header:
if adcp.xy is not None:
arraywriter.writerow(['x']+[adcp.xy_srs])
arraywriter.writerow(adcp.xy[:,0].tolist())
arraywriter.writerow(['y'])
arraywriter.writerow(adcp.xy[:,1].tolist())
elif adcp.lonlat is not None:
arraywriter.writerow(['longitude'])
arraywriter.writerow(adcp.lonlat[:,0].tolist())
arraywriter.writerow(['latitude'])
arraywriter.writerow(adcp.lonlat[:,1].tolist())
arraywriter.writerow(['bin_center_elevation'])
arraywriter.writerow(adcp.bin_center_elevation.tolist())
arraywriter.writerow(['U'])
for i in range(adcp.n_ensembles):
arraywriter.writerow(v[i,:,0].tolist())
arraywriter.writerow(['V'])
for i in range(adcp.n_ensembles):
arraywriter.writerow(v[i,:,1].tolist())
arraywriter.writerow(['W'])
for i in range(adcp.n_ensembles):
arraywriter.writerow(v[i,:,2].tolist())
def write_csv_velocity_db(adcp,csv_filename,no_header=False,un_rotate_velocties=True):
"""
Writes comma-delimited ensemble-mean U,V
Inputs:
ADCP = ADCPData object
csv_filename = file path of output file
no_header = boolean, True = don'r write position data, False = write position data
Returns:
nothing
"""
# direct dump of numpy array - opps required numpy v1.8
#np.savetext(csv_filename,adcp.velocity,delimiter=",")
if un_rotate_velocties:
v = adcp.get_unrotated_velocity()
else:
v = adcp.velocity
with open(csv_filename, 'wb') as csvfile:
arraywriter = csv.writer(csvfile, delimiter=',',
quoting=csv.QUOTE_MINIMAL)
if not no_header:
if adcp.xy is not None:
header = ['x [%s]'%adcp.xy_srs,'y [%s]'%adcp.xy_srs]
elif adcp.lonlat is not None:
header = ['longitude [degE]','latitude [degN]']
else:
print 'Error, input adcp has no position data - no file written'
return
header.extend(['z [m]','datetime','u [m/s]','v [m/s]','w [m/s]',])
arraywriter.writerow(header)
for i in range(adcp.n_ensembles):
for j in range(adcp.n_bins):
if adcp.mtime is None:
rec_time = 'None'
elif adcp.mtime[i] is None or np.isnan(adcp.mtime[i]):
rec_time = 'None'
else:
rec_time = num2date(adcp.mtime[i]).strftime('%c')
if adcp.xy is not None:
db_record = [adcp.xy[i,0],adcp.xy[i,1]]
else:
db_record = [adcp.lonlat[i,0], adcp.lonlat[i,1]]
db_record = db_record + [adcp.bin_center_elevation[j],
rec_time,
v[i,j,0],
v[i,j,1],
v[i,j,2]]
arraywriter.writerow(db_record)
def write_ensemble_mean_velocity_db(adcp,csv_filename,no_header=False,
un_rotate_velocties=True,elev_line=None,
range_from_velocities=False):
"""
Writes comma-delimited velocties to a text file, optionally with xy-positions
or lon-lat positions, and bin_center_elveations. The write order for a 2D
velocity aray is the first (leftmost) axis is written horizontally.
Inputs:
ADCP = ADCPData object
csv_filename = file path of output file
no_header = boolean, True = don'r write position data, False = write position data
Returns:
nothing
"""
# direct dump of numpy array - opps required numpy v1.8
#np.savetext(csv_filename,adcp.velocity,delimiter=",")
if un_rotate_velocties and adcp.rotation_angle is not None:
r_axis = adcp.rotation_axes
r_angle = adcp.rotation_angle
adcp.set_rotation(None)
UVW = adcp.ensemble_mean_velocity(elev_line=elev_line,
range_from_velocities=range_from_velocities)
adcp.set_rotation(r_angle,r_axis)
else:
UVW = adcp.ensemble_mean_velocity(elev_line=elev_line,
range_from_velocities=range_from_velocities)
with open(csv_filename, 'wb') as csvfile:
arraywriter = csv.writer(csvfile, delimiter=',',
quoting=csv.QUOTE_MINIMAL)
if not no_header:
if adcp.xy is not None:
header = ['x [%s]'%adcp.xy_srs,'y [%s]'%adcp.xy_srs]
elif adcp.lonlat is not None:
header = ['longitude [degE]','latitude [degN]']
else:
print 'Error, input adcp has no position data - no file written'
return
header.extend(['datetime','U [m/s]','V [m/s]'])
arraywriter.writerow(header)
for i in range(adcp.n_ensembles):
if adcp.mtime is None:
rec_time = 'None'
elif adcp.mtime[i] is None or np.isnan(adcp.mtime[i]):
rec_time = 'None'
else:
rec_time = num2date(adcp.mtime[i]).strftime('%c')
if adcp.xy is not None:
db_record = [adcp.xy[i,0],adcp.xy[i,1]]
else:
db_record = [adcp.lonlat[i,0], adcp.lonlat[i,1]]
db_record = db_record + [rec_time,
UVW[i,0],
UVW[i,1]]
arraywriter.writerow(db_record)
#def split_repeat_survey_into_transects(adcp_survey):
#
# if adcp_survey.xy is None:
# raise Exception,'ADCP data must have an XY projection for trasect detection and splitting'
# velocity_change = np.abs(adcp_survey.xy[1:-1,0]-adcp_survey.xy[0:-2,0]) + \
# np.abs(adcp_survey.xy[1:-1,1]-adcp_survey.xy[0:-2,1])
# sps.find_peaks_cwt(velocity_change, np.arange(1,10), wavelet=None, max_distances=None, gap_thresh=None, min_length=None, min_snr=1, noise_perc=10)
def group_adcp_obs_by_spacetime(adcp_obs,max_gap_m=30.0,
max_gap_minutes=20.0,max_group_size=6):
"""
Sorts ADCPData objects first into groups by closeness in terms of location,
and then further sorts location groups by time. Groups of ADCPData objects
must first be within max_gap_m from each other, and then be within max_gap_minutes
of each other.
Inputs:
adcp_obs = list of ADCPTransectData objects
max_gap_m = maximum distance allowed between ADCP observations when grouping
max_gap_minutes = maximum time allowed between ADCP observations when grouping
max_group_size = maximum number of ADCPData objects per group
Returns:
List of lists that contain groups of input ADCPData objects
"""
space_groups = group_adcp_obs_within_space(adcp_obs,max_gap_m)
for i in range(len(space_groups)):
print 'space group',i,'- ',len(space_groups[i]), 'observations'
spacetime_groups = []
for grp in space_groups:
(sub_groups, gaps) = group_adcp_obs_within_time(grp,
max_gap_minutes,
max_group_size)
spacetime_groups.extend(sub_groups)
for i in range(len(spacetime_groups)):
print 'spacetime group',i,'- ',len(spacetime_groups[i]), 'observations'
return spacetime_groups
def group_adcp_obs_within_space(adcp_obs,max_gap_m=30.0):
"""
Sorts ADCPData objects into groups by closeness in space, in an
ordered-walk/brute force manner. Distances between all ADCO_Data observation
centroids are found, and then starting with the first ADCO_data, the remaining
ADCPData objects are evaluated for distance to the first. If within 'max_gap_m'
they are grouped and marked as 'picked' so they will not assigned to a group
more than once.
Inputs:
adcp_obs = list of ADCPTransectData objects
max_gap_m = maximum distance allowed between ADCP observations when grouping
Returns:
List of lists that contain groups of input ADCPData objects
"""
n_obs = len(adcp_obs)
(centers,distances) = find_centroid_distance_matrix(adcp_obs)
picked = np.zeros(n_obs,np.int)
groups = []
for i in range(n_obs):
if not picked[i] and ~np.isnan(centers[i][0,0]):
sub_group = [adcp_obs[i],]
picked[i] = 1
my_dist = distances[i,:]
nn = np.argsort(my_dist)
for n in nn:
if not picked[n] and ~np.isnan(centers[n][0,0]):
if my_dist[n] < max_gap_m:
sub_group.append(adcp_obs[n])
picked[n] = 1
groups.append(sub_group)
return groups
def group_adcp_obs_within_time(adcp_obs,max_gap_minutes=20.0,max_group_size=6):
"""
Sorts ADCPData objects into groups by closeness in time, with groups being
separated by more than 'max_gap_minutes'.This method first sorts the group by
start time, and then splits the observations where they are more than
'max_gap_minutes' apart.
Inputs:
adcp_obs = list of ADCPTransectData objects
max_gap_minutes = maximum Time allowed between ADCP observations when grouping
max_group_size = maximum number of ADCPData objects per group
Returns:
List of lists that contain groups of input ADCPData objects
"""
if len(adcp_obs) == 1:
return ([adcp_obs,], [None,])
else:
start_times = list()
for a in adcp_obs:
if a.mtime is not None:
start_times.append(a.mtime[0])
else:
start_times.append(None)
if start_times:
gaps, nn, nnan = find_start_time_gaps(start_times)
adcp_obs_sorted = [ adcp_obs[i] for i in nn ]
# convert nnan boolean list to integer index
nnan_i = nnan * range(len(nnan))
adcp_obs_sorted = [ adcp_obs_sorted[i] for i in nnan_i ]
return group_according_to_gap(adcp_obs_sorted,gaps,max_gap_minutes,max_group_size=6)
else:
raise Exception,"find_transects_within_minimum_time_gap(): No valid times found in input files!"
def find_adcp_files_within_period(working_directory,max_gap=20.0,max_group_size=6):
"""
Sorts a directory of ADCPRdiWorkHorseData raw files into groups by
closeness in time, with groups being separated by more than
'max_gap_minutes'. This method first sorts the files by start time, and
then splits the observations where they are more than
'max_gap_minutes' apart.
Inputs:
working_directory = directory path containing ADCP raw or netcdf files
max_gap = maximum time allowed between ADCP observations when grouping (minutes)
max_group_size = maximum number of ADCPData objects per group
Returns:
List of lists that contain groups of input ADCPData objects
"""
if os.path.exists(working_directory):
data_files = glob.glob(os.path.join(working_directory,'*[rR].000'))
data_files.extend(glob.glob(os.path.join(working_directory,'*.nc')))
else:
print "Path (%s) not found - exiting."%working_directory
exit()
start_times = list()
for data_file in data_files:
try:
a = adcpy.open_adcp(data_file,
file_type="ADCPRdiWorkhorseData",
num_av=1)
start_times.append(a.mtime[0])
except:
start_times.append(None)
if start_times:
gaps, nn, nnan = find_start_time_gaps(start_times)
data_files_sorted = [ data_files[i] for i in nn ]
# convert nnan boolean list to integer index
nnan_i = nnan * range(len(nnan))
data_files_sorted = [ data_files_sorted[i] for i in nnan_i ]
return group_according_to_gap(data_files_sorted,gaps,max_gap,max_group_size)
def find_start_time_gaps(start_times_list):
"""
Find the time difference in minutes between datenum elements in a list
Sorts, removed nans, adn turns remaing datnum values in 'start_times_list'
ino datetime objects, finds the timedelta objects between then, and
converts to minutes.
Inputs:
start_times_list = numpy 1D array of matplotlib datenum values
Returns:
time_gaps_minutes = gaps between sorted times in start_times_list {minutes}
nn = sort index for start_times_list
nnan = boolean index of start_times_list[nn] where True is is non-nan
"""
# sort, remove unknowns, convert to datetime object
start_times = np.array(start_times_list, dtype=np.float64)
nn = np.argsort(start_times)
start_times_sorted = start_times[nn]
nnan = ~np.isnan(start_times_sorted)
start_times_sorted = num2date(start_times_sorted[nnan])
# returns datetime.timedelta objects
time_gaps_minutes = np.zeros(len(start_times_sorted)-1,np.float64)
for i in range(len(start_times_sorted)-1):
t_delta = start_times_sorted[i+1]-start_times_sorted[i]
# timedelta objects only have days/seconds
time_gaps_minutes[i] = t_delta.total_seconds()/60.0
return (time_gaps_minutes, nn, nnan)
def group_according_to_gap(flat_list,gaps,max_gap,max_group_size):
"""
Splits a python list into groups by their gaps in time, using a list of
gaps between them.
Inputs:
flat_list = python list, shape [n]
gaps = numeric list, shape [n-1], descibing gaps between elements of flat_list
max_gap = maximum gap allowed between list elements
max_group_size = maximum number of list elements per group
Returns:
List of lists that contain groups of input list elements
"""
within_gap = gaps <= max_gap
groups = list()
group_gaps = list()
sub_group = list()
sub_gaps = list()
sub_group.append(flat_list[0])
for i in range(len(gaps)):
if ~within_gap[i] or len(sub_group) >= max_group_size:
groups.append(sub_group)
if not sub_gaps:
sub_gaps.append((None,))
group_gaps.append(sub_gaps)
sub_group = []
sub_gaps = []
else:
sub_gaps.append(gaps[i])
sub_group.append(flat_list[i+1])
groups.append(sub_group)
if not sub_gaps:
sub_gaps.append((None,))
group_gaps.append(sub_gaps)
# returning (list of file lists, list of gap time lists)
return (groups, group_gaps)
def calc_transect_flows_from_uniform_velocity_grid(adcp,depths=None,use_grid_only=False):
"""
Calculates the cross-sectional area of the ADCP profiles from projection
data, and multiplies it by the velocities to calculate flows
and mean velocities.
Inputs:
adpc = ADCPData object. projected to an xy regualr grid projection
depths = optional 1D array of depths that correspond the ensemble
dimension of velocity in adcp
use_grid_only = True: use each grid cell to calc flows/mean velocities
False: first find depth-average velocties, then use depths to find
flows/mean velocties
Returns:
scalar_mean_vel = mean veolocity of total flow shape [3] {m/s}
depth_averaged_vel = depth averaged velocity, shape [n,3] {m/s}
total_flow = total U,V, and W discharge [3] {m^3/s}
total_survey_area = total area used for flow calculations {m^3}
"""
# check to see if adcp is child of ADCPTransectData ??
if adcp.xy is None:
ValueError("xy projection required")
raise
if adcp.rotation_angle is None:
print 'Warning - No alignment axis set: Calculating flows according to U=East and V=North'
rfv = False
if not "bt_depth" in dir(adcp):
rfv = True
elif adcp.bt_depth is None:
rfv = True
xd,yd,dd,xy_line = adcpy.util.find_projection_distances(adcp.xy)
dxy = abs(dd[0]-dd[1])
dz = abs(adcp.bin_center_elevation[0]-adcp.bin_center_elevation[1])
(depths, velocity_mask) = adcp.get_velocity_mask(range_from_velocities=rfv,nan_mask=True)
if use_grid_only:
area_grid = velocity_mask*dxy*dz
total_survey_area = np.nansum(np.nansum(area_grid))
scalar_mean_vel = np.zeros(3)
total_flow = np.zeros(3)
depth_averaged_vel = np.zeros((adcp.n_ensembles,3))
for i in range(3):
total_flow[i] = np.nansum(np.nansum(adcp.velocity[:,:,i]*area_grid))
masked_vel = adcp.velocity[:,:,i]*velocity_mask
depth_averaged_vel[:,i] = sp.nanmean(masked_vel,axis=1)
scalar_mean_vel[i] = sp.nanmean(masked_vel.ravel())
else:
if rfv:
print 'Warning - No bottom depth set: Calculating flows according valid velocity bins only'
total_survey_area = np.nansum(dxy*depths)
depth_averaged_vel = adcp.ensemble_mean_velocity(range_from_velocities=rfv)
depth_integrated_flow = adcp.calc_ensemble_flow(range_from_velocities=rfv)
scalar_mean_vel = sp.nanmean(depth_averaged_vel,axis=0)
total_flow = np.nansum(depth_integrated_flow,axis=0)
return (scalar_mean_vel, depth_averaged_vel, total_flow, total_survey_area)
def find_centroid_distance_matrix(adcp_obs):
"""
Calculates all possible distances between a list of ADCPData objects (twice...ineffcient)
Inputs:
adcp_obs = list ADCPData objects, shape [n]
Returns:
centers = list of centorids of ensemble locations of input ADCPData objects, shape [n]
distances = xy distance between centers, shape [n-1]
"""
n_obs = len(adcp_obs)
distances = np.empty((n_obs,n_obs),np.float64)
centers = []
for a in adcp_obs:
if a.xy is not None:
centers.append(adcpy.util.centroid(a.xy))
else:
centers.append(np.array([np.nan,np.nan]))
centers = [adcpy.util.centroid(a.xy) for a in adcp_obs]
for i in range(n_obs):
for j in range(n_obs):
distances[i,j] = adcpy.util.find_line_distance(centers[i],centers[j])
return (centers,distances)
def transect_rotate(adcp_transect,rotation,xy_line=None):
"""
Calculates all possible distances between a list of ADCPData objects (twice...ineffcient)
Inputs:
adcp_obs = list ADCPData objects, shape [n]
Returns:
centers = list of centorids of ensemble locations of input ADCPData objects, shape [n]
distances = xy distance between centers, shape [n-1]
"""
"""
Rotates ADCPTransectData U and V velocities.
Inputs:
adcp_transect = ADCPTransectData object
rotation = one of:
None - no rotation of averaged velocity profiles
'normal' - rotation based upon the normal to the plotline (default rotation type)
'pricipal flow' - uses the 1st principal component of variability in uv flow direction
'Rozovski' - individual rotation of each verticle velocity to maximize U
'no transverse flow' - rotation by the net flow vector is used to minnumize V
xy_line = numpy array of line defined by 2 points: [[x1,y1],[x2,y2]], or None
Returns
adcp_transect = ADCPTransectData object with rotated uv velocities
"""
if rotation == "normal":
# find angle of line:
if xy_line is None:
if adcp_transect.xy is None:
raise Exception,"transect_rotate() error: ADCPData must be xy projected, or input xy_line must be supplied for normal rotation"
xy_line = adcpy.util.map_xy_to_line(adcp_transect.xy)
theta = adcpy.util.calc_normal_rotation(xy_line)
elif rotation == "no transverse flow":
flows = adcp_transect.calc_ensemble_flow(range_from_velocities=True)
theta = adcpy.util.calc_net_flow_rotation(flows[:,0],flows[:,1])
elif rotation == "Rozovski":
flows = adcp_transect.calc_ensemble_flow(range_from_velocities=True)
theta = adcpy.util.calc_Rozovski_rotation(flows[:,0],flows[:,1])
elif rotation == "principal flow":
flows = adcp_transect.calc_ensemble_flow(range_from_velocities=True)
theta = adcpy.util.principal_axis(flows[:,0],flows[:,1],calc_type='EOF')
elif type(rotation) is str:
raise Exception,"In transect_rotate(): input 'rotation' string not understood: %s"%rotation
else:
theta = rotation
adcp_transect.set_rotation(theta,'uv')
return adcp_transect
def find_uv_dispersion(adcp):
"""
Calculates dispersion coeffcients of velocties in adcp according to
Fischer et al. 1979
Inputs:
adcp = ADCPTransectData object
Returns:
ustbar =
Kx_3i = horizontal dispersion coefficients
Ky_3i = lateral dispersion coefficients
"""
# should check to see if it is regular grid - required for dispersion calc
if adcp.xy is None:
ValueError("adcp.xy (xy projection) must exist for dispersion calculation")
raise
if adcp.bt_depth in adcp.__dict__:
depth = adcp.bt_depth
else:
(depth, velocity_mask) = adcp.get_velocity_mask(range_from_velocities=True,
nan_mask=True)
xd,yd,dd,xy_line = adcpy.util.find_projection_distances(adcp.xy)
return adcpy.util.calcKxKy(adcp.velocity[:,:,0],
adcp.velocity[:,:,1],
dd,
adcp.bin_center_elevation,
depth)
|
esatel/ADCPy
|
adcpy/adcpy_recipes.py
|
Python
|
mit
| 28,389
|
[
"NetCDF"
] |
a8f1019fd0328492d2b37670e4a30537e6ed67e8955ba2b174a373cf59181982
|
# Author: Prabhu Ramachandran
# License: BSD style
# Copyright (c) 2004, Enthought, Inc.
""" A Traits-based wrapper for the Visualization Toolkit.
Part of the Mayavi project of the Enthought Tool Suite.
"""
from os.path import exists, join, dirname, isdir
# The tvtk wrapper code is all typically inside one zip file. We try to
# find this file and put it in __path__ and then create the 'tvtk' module
# wrapper from that. If the ZIP file is extracted into a tvtk_classes
# directory the ZIP file is not used and the tvtk_classes directory is
# inserted into sys.path and the directory contents are used for the tvtk
# classes -- note that you must have the following structure
# tvtk_classes/tvtk_classes/__init__.py. This is handy for tools like
# pydev (Eclipse).
# We add the path to the local __path__ here, in the __init__, so that
# the unpickler can directly unpickle the TVTK classes.
_zip = join(dirname(__file__), 'tvtk_classes.zip')
tvtk_class_dir = join(dirname(__file__), 'tvtk_classes')
if exists(tvtk_class_dir) and isdir(tvtk_class_dir):
# Nothing to do, it will imported anyhow.
pass
elif exists(_zip):
__path__.append(_zip)
|
dmsurti/mayavi
|
tvtk/__init__.py
|
Python
|
bsd-3-clause
| 1,169
|
[
"Mayavi"
] |
e56e8767744f61558604accc2d583ddfe2dc091774cc5526f9869ccaee70492b
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2018 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import collections
import numpy as np
from ..physconst import psi_bohr2angstroms
def to_string(molrec, dtype, units='Angstrom', atom_format=None, ghost_format=None, width=17, prec=12): #, return_options=False):
"""Format a string representation of QM molecule.
Parameters
----------
molrec : dict
Psi4 json Molecule spec.
dtype : {'xyz'}
Overall string format. Note that it's possible to request variations
that don't fit the dtype spec so may not be re-readable (e.g., ghost
and mass in nucleus label with 'xyz').
'cfour' forces nucleus label, ignorming atom_format, ghost_format
units : {'Angstrom', 'Bohr'}
Units in which to write string. There is not an option to write in
intrinsic/input units. For `dtype='xyz', units='Bohr'` where the
format doesn't have a slot to specify units, "au" is added so that
readable as 'xyz+'.
atom_format : str, optional
General format is '{elem}'. A format string that may contain fields
'elea' (-1 will be ''), 'elez', 'elem', 'mass', 'elbl' in any
arrangement. For example if a format naturally uses element symbol
and you want atomic number instead with mass info, too, pass
'{elez}@{mass}'. See `ghost_format` for handling field 'real'.
ghost_format : str, optional
General format is '@{elem}'. Like `atom_format`, but this formatter
is used when `real=False`. To suppress ghost atoms, use `ghost_format=''`.
width : int, optional
Field width for formatting coordinate float.
prec : int, optional
Number of decimal places for formatting coordinate float.
# return_options : bool, optional
# Some dtypes (cfour) can also return options knowable from `molrec`
Returns
-------
smol : str
String representation of the molecule.
# opts : dict
# Only when `return_options=True`Some formats (cfour) can also return options
"""
#funits, fiutau = process_units(molrec)
#molrec = self.to_dict(force_units=units, np_out=True)
if molrec['units'] == 'Angstrom' and units == 'Angstrom':
factor = 1.
elif molrec['units'] == 'Angstrom' and units == 'Bohr':
if 'input_units_to_au' in molrec:
factor = molrec['input_units_to_au']
else:
factor = 1. / psi_bohr2angstroms
elif molrec['units'] == 'Bohr' and units == 'Angstrom':
factor = psi_bohr2angstroms
elif molrec['units'] == 'Bohr' and units == 'Bohr':
factor = 1.
else:
raise ValidationError("""units must be 'Angstrom'/'Bohr', not {}""".format(units))
geom = np.array(molrec['geom']).reshape((-1, 3)) * factor
name = molrec.get('name', formula_generator(molrec['elem']))
tagline = """auto-generated by qcdb from molecule {}""".format(name)
if dtype == 'xyz':
atom_format = '{elem}' if atom_format is None else atom_format
ghost_format = '@{elem}' if ghost_format is None else ghost_format
atoms = _atoms_formatter(molrec, geom, atom_format, ghost_format, width, prec, 2)
nat = len(atoms)
first_line = """{}{}""".format(str(nat), ' au' if units == 'Bohr' else '')
smol = [first_line, name]
smol.extend(atoms)
elif dtype == 'cfour':
# Notes
# * losing identity of ghost atoms. picked up again in basis formatting
# * casting 'molecular_charge' to int
# * no spaces at the beginning of 1st/comment line is important
atom_format = '{elem}'
ghost_format = 'GH'
atoms = _atoms_formatter(molrec, geom, atom_format, ghost_format, width, prec, 2)
smol = [tagline]
smol.extend(atoms)
elif dtype == 'nwchem':
atom_format = '{elem}'
ghost_format = 'GH'
atoms = _atoms_formatter(molrec, geom, atom_format, ghost_format, width, prec, 2)
first_line = """geometry units {}""".format(units.lower())
# noautosym nocenter # no reorienting input geometry
fix_symm = molrec.get('fix_symmetry', None)
symm_line = ''
if fix_symm:
symm_line = 'symmetry {}'.format(fix_symm) # not quite what Jiyoung had
last_line = """end"""
smol = [first_line]
smol.extend(atoms)
smol.append(symm_line)
smol.append(last_line)
return '\n'.join(smol)
def _atoms_formatter(molrec, geom, atom_format, ghost_format, width, prec, sp):
"""Format a list of strings, one per atom from `molrec`."""
#geom = molrec['geom'].reshape((-1, 3))
nat = geom.shape[0]
fxyz = """{:>{width}.{prec}f}"""
sp = """{:{sp}}""".format('', sp=sp)
atoms = []
for iat in range(nat):
atom = []
atominfo = {'elea': '' if molrec['elea'][iat] == -1 else molrec['elea'][iat],
'elez': molrec['elez'][iat],
'elem': molrec['elem'][iat],
'mass': molrec['mass'][iat],
'elbl': molrec['elbl'][iat]}
if molrec['real'][iat]:
nuc = """{:{width}}""".format(atom_format.format(**atominfo), width=width)
atom.append(nuc)
else:
if ghost_format == '':
continue
else:
nuc = """{:{width}}""".format(ghost_format.format(**atominfo), width=width)
atom.append(nuc)
atom.extend([fxyz.format(x, width=width, prec=prec) for x in geom[iat]])
atoms.append(sp.join(atom))
return atoms
def formula_generator(elem):
"""Return simple chemical formula from element list `elem`.
>>> formula_generator(['C', 'Ca', 'O', 'O', 'Ag']
AgCCaO2
"""
counted = collections.Counter(elem)
return ''.join((el if cnt == 1 else (el + str(cnt))) for el, cnt in sorted(counted.items()))
if __name__ == '__main__':
formula_generator(['C', 'Ca', 'O', 'O', 'Ag'])
|
amjames/psi4
|
psi4/driver/qcdb/molparse/to_string.py
|
Python
|
lgpl-3.0
| 6,850
|
[
"CFOUR",
"NWChem",
"Psi4"
] |
398ef010c511cb545b72a47170075e09f70bffadea951e02caacc1c2292c8e89
|
"""
Compute optical responses with OPTpy
"""
from OPTpy import OPTflow,Structure
#from myLRC import mkjob_by_task
flow = OPTflow(
dirname = './',
#
# Common variables:
#
prefix = 'gaas', # Root name for files required by Tiniba
# Structure from file:
structure=Structure.from_file('GaAs.cif'),
#
# Pseudopotentials:
#
pseudo_dir = './',
pseudos = ['31ga.3.hgh', '33as.5.hgh'],
ecut = 15.0, # Cutoff energy for wavefunctions
nspinor = 2, # Number of spinorial components
#
# Variables for density:
#
ngkpt = [4,4,4], # k-point grid for density
kshift = [[.5,.5,.5],[.5,0,0],[0,.5,0],[0,0,.5]], # k-point shift for density
#
# Variables for momentum matrix elements and responses:
#
kgrid_response=[4,4,4], # k-point grid for responses
nband=36, # Total number of bands
nbdbuf=2, # Bands in buffer (see Abinit documentation)
nval_total=8, # Total number of valence bands
#
# Variables for responses:
#
# Bands to include for transitions (can be less than for RPMNS)
ncond=8, # Number of conduction bands to include
nval=8, # (= nval_total) All valence bands must be included, not working yet for nval < nval_total
# Response to calculate, see Doc. in responses.py
response=21, #21 for SHG, 1 for linear response
components=["xyz"], #xyz tensor component
# WFN and RPMS calculation split by k-points
split_by_proc=False,
# Default parameters for the MPI runner.
# Please adapt them to your needs.
nproc = 16,
nproc_per_node = '',
mpirun = 'mpirun',
nproc_flag = '',
nproc_per_node_flag = '',
mpi_extra_vars='',
)
# Execution
# This is my local execution task:
flow.write()
#
# In case you need to load modules (in clusters),
# please modify this
#modules="\
#module swap gcc intel \n\
#module load openmpi mkl\n\
#module load intel/2013_sp1.4.211 openmpi hdf5/1.8.13-intel-p\n"
#
# Not yet working:
#flow.run()
#flow.report()
|
trangel/OPTpy
|
examples/flows/GaAs.py
|
Python
|
gpl-3.0
| 2,072
|
[
"ABINIT"
] |
f40172a7a4a182994e4af3c28a10a2ac3bf5989a2a5d66ed1f8e6ea9887d2393
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 11 08:08:52 2016
@author: tkc
"""
import re
from collections import defaultdict
import pandas as pd
import numpy as np
import scipy
import scipy.stats
from scipy import optimize
from math import factorial # used by Savgol matrix
from scipy.optimize import curve_fit
#%%
def organizecolumns(df1,mycols):
''' Pass df and template (list of desired columns in desired order) and return reorganized newdf
'''
cols1=df1.columns.tolist()
newdf=df1 # avoids modification of passed df
uniquelist=[i for i in cols1 if i not in mycols]
for i,colname in enumerate(uniquelist): # remove cols from df1 that are absent from df2
# newdf.drop(colname, axis=1, inplace=True) # this modifies both passed and returned dfs
newdf=newdf.drop(colname, axis=1)
newdf=newdf[mycols] # reorder columns based on template df
return newdf
def parseelemlist(elemlist):
'''Find and separate multielement peaks to be averaged (e.g. Fe2 & Fe) from longer string of element peaks
e.g. splits "Mg Fe Fe2 Si" into "Mg Si" and "{Fe,[Fe,Fe2]} dictionary'''
# Strip numbers from strings within list
newlist=[re.match('\D+',i).group(0) for i in elemlist]
# find duplicated peaks (multiple peaks per element)
Multielem = defaultdict(list)
for i, item in enumerate(newlist):
Multielem[item].append(i)
Multielem = {k:v for k,v in Multielem.items() if len(v)>1} # dictionary with duplicated item and list with indices
duplist=list(Multielem.values()) # get list
duplist=[item for sublist in duplist for item in sublist] # single list with positions of duplicated elements
# now alter multipeak elements list to give dict with element and then list of peak for that element
for key,value in Multielem.items():
templist=value # dictionary value is list of elem peak index positions
peaklist=[]
for i, index in enumerate(templist): # create new list with original elem peak from index positions
peaklist.append(elemlist[index])
# now replace list of index positions with elempeak names
Multielem.update({key:peaklist}) # key will be multipeak element string i.e. "Fe"
# finally construct new single elements list with multipeak ones removed (handle each separately)
newelemlist=[]
for i in range(0,len(elemlist)):
if i not in duplist:
newelemlist.append(elemlist[i])
return newelemlist, Multielem
def parseelem2(elemlist, Multielem):
''' After multielement peaks removed, also move secondary peaks used as primary to dict (handle separately)
e.g. splits "S Mg Fe2 Si" into "S Mg Si" and "{Fe,[Fe2]} dictionary; same structure and df output
for averaging of Fe, Fe2, or straight Fe2 or straight Fe'''
# starting elemlist will only have single entries (i.e Ti2 but not Ti & Ti2)
newelemlist=[]
for i, elem in enumerate(elemlist):
if re.search(r'\d',elem): # has number
match=re.search(r'\d',elem)
newkey=elem[0:match.start()]
# store alt quant (i.e. on Ti2) with same structure as multiple quant (Ti & Ti2)
# Another entry in multielement list... makes things easier for later quant comparisons
templist=[] # peakIDs added as list (of length 1)
templist.append(elem) # list containing single string (keeps identical data structure)
Multielem.update({newkey:templist}) # add to existing dictionary for separate handling
else:
newelemlist.append(elemlist[i]) # just copy over
return newelemlist, Multielem # return altered element list and multielem dictionary
def getelemthresholds(elemlist, AESquantparams):
'''get element-dependent significance thresholds for each peak from AESquantparams
return dictionary with element and associated significance level'''
thresholds={} # returns list of element dependent thresholds for this element set
for i, elem in enumerate(elemlist):
# find row in AESquantparams for this element
thiselemdata=AESquantparams[(AESquantparams['element']==elem)]
thiselemdata=thiselemdata.squeeze() # series with this elements params
thresholds.update({elem:thiselemdata.siglevel})
return thresholds
def cloneparamrows(df):
''' Make param log entry for for each areanum - used by calccomposition to correctly process spe files with multiple spatial areas
passed df is usually list of spe files
this solves problem that AugerParamLog only has one entry (despite possibly having multiple distinct areas with different spectra'''
df['Areanumber']=1 # set existing entries as area 1
mycols=df.dtypes.index
newrows=pd.DataFrame(columns=mycols) # blank df for new entries
for index, row in df.iterrows():
numareas=int(df.loc[index]['Areas'])
for i in range(2,numareas+1):
newrow=df.loc[index] # clone this row as series
newrow=newrow.set_value('Areanumber',i)
newrows=newrows.append(newrow)
df=pd.concat([df,newrows], ignore_index=True) # merge new rows with existing ones
df=df.sort_values(['Filenumber','Areanumber'])
return df
def calccomp(df, Integquantlog, elemlist, AESquantparams):
'''Calculate elemental composition of given files based on input element list
threshold - ratio of element peak to noise peak (0 means no threshold applied
load element-dependent significance level from AESquantparams'''
thresholds=getelemthresholds(elemlist, AESquantparams) # Get list of sigma levels for significance/inclusion
# thresholds for both single and multipeak
elemlist, multipeaklist = parseelemlist(elemlist) # list of single peak elements and dict with multipeaks
# check if any of the single peaks are secondary (i.e. quant on Fe2 not main Fe)
elemlist, multipeaklist= parseelem2(elemlist, multipeaklist)
# two element lists needed (elements with one peak and elements with compositions averaged from two peaks i.e. Fe2, Fe3)
# to process compositions from multiple areas, clone rows from spe log (one for each areanum)
df=cloneparamrows(df) # splits single entry for 5 spatial area spe into 5 rows with Areanumber 1-5
df=df.reset_index(drop=True)
df['AESbasis']=0.0 # resets to zero if already present from calcamplitude
mycols=['Filenumber', 'Project', 'Filename', 'FilePath', 'Sample', 'Comments','AESbasis','Areanumber']
for i, elem in enumerate(elemlist): # add columns for basis
df[elem]=0.0 # add col for each element to spelist
df['sig'+elem]=0.0 # copy peak significance (ratio of integrated counts over 1 sigma of background)
df['err'+elem]=0.0 # another for total error in adjusted counts basis
mycols.append(elem)
mycols.append('sig'+elem)
mycols.append('err'+elem)
for i,elem in enumerate(list(multipeaklist.keys())): # get elements (keys) from dict
df[elem]=0.0
df['sig'+elem]=0.0
df['err'+elem]=0.0
mycols.append(elem)
mycols.append('sig'+elem)
mycols.append('err'+elem)
for i, elem in enumerate(elemlist): # now add at.% columns (e.g. %S, %Mg)
colname='%'+elem # at % columns named %S, %Mg, etc.
errname='err%'+elem
mycols.append(colname) # add to column list template
mycols.append(errname)
df[colname]=0.0
df[errname]=0.0
for i,elem in enumerate(list(multipeaklist.keys())): # add multipeak elements
colname='%'+elem # at % columns named %S, %Mg, etc.
errname='err%'+elem
mycols.append(colname) # add to column list template
mycols.append(errname)
df[colname]=0.0
df[errname]=0.0
for i in range(0,len(df)): # loop through all desired spectrum (multiarea ones already have duplicated rows)
filenum=df.iloc[i]['Filenumber']
areanum=df.iloc[i]['Areanumber']
match=Integquantlog[Integquantlog['Filenumber']==filenum] # find integ data for this filenumber
match=match[match['Areanumber']==areanum]
basis=0.0 #
for j, elem in enumerate(elemlist): # handle the single peak elements
temp=match[match['Element']==elem] # finds entry for this element
if len(temp)==1:
# thresholds is dict with required significance level for each element
thisthresh=thresholds.get(elem) # sig level for this element
df=df.set_value(i, 'sig'+elem, temp.iloc[0]['Significance']) # always copy peak significance level
if temp.iloc[0]['Significance']>thisthresh: # if above set threshold then calculate elem's value and add to basis
df=df.set_value(i, elem, temp.iloc[0]['Adjcnts']) # copy adjusted counts of this element
df=df.set_value(i, 'err'+elem, temp.iloc[0]['Erradjcnts'])
basis+=temp.iloc[0]['Adjcnts'] # add this element's value to AES basis
# now handle the multipeak elements (get average value from both peaks)
for key, value in multipeaklist.items(): # key is element (aka colname in df), value is list of peaks in Smdifpeakslog
templist=value # dictionary value is list of elem peak index positions
numlines=len(templist) # this is number of lines that are average (i.e. 2 for Fe&Fe2)
avgval=0.0 # working value for averaged adjamplitude
erravgval=0.0 # combined error from erradjcnts of each line
for k, peak in enumerate(templist): # create new list with original elem peak from index positions
temp=match[match['Element']==peak] # finds integquantlog entry for this peak (match already trimmed to filenum and area)
if len(temp)==1:
thisthresh=thresholds.get(peak) # sig level for this element/peak
df=df.set_value(i, 'sig'+elem, temp.iloc[0]['Significance']) # copy peak significance level
if temp.iloc[0]['Significance']>thisthresh:
avgval+=temp.iloc[0]['Adjcnts']
thiserrperc=temp.iloc[0]['Erradjcnts']/temp.iloc[0]['Adjcnts']**2
erravgval+=thiserrperc # sum of square of relative error
else:
numlines=numlines-1 # if peak is zeroed out and not added, this reduces # peaks in average
if numlines>0: # avoid divbyzero if peak is too small
avgval=avgval/numlines # this is now average basis for given element
erravgval=np.sqrt(erravgval) # sqrt of sum of squares is relative error
df=df.set_value(i, key, avgval) # copy adjusted amplitude of this element
df=df.set_value(i, key+'err', avgval*erravgval) # combined actual error of this elem (as detemined from mulitple lines)
# add value from this element to AESbasis
basis+=avgval
# end of multipeak elements loop
df=df.set_value(i, 'AESbasis', basis) # write total basis value to df
# Now compute at.% for each listed element (incl errors)
for j, elem in enumerate(elemlist):
colname='%'+elem
ratio=df.iloc[i][elem]/df.iloc[i]['AESbasis'] # initialized to zero in cases where peak is below significance threshold
df.set_value(i, colname, ratio)
temp=match[match['Element']==elem] # again find peak entry and get finds entry for this peak
# TODO maybe check threshold again (although element's value will be zero)
if len(temp)==1:
thiserr=temp.iloc[0]['Erradjcnts']
atpercerr=thiserr/df.iloc[i]['AESbasis']
errname='err%'+elem # error column
df.set_value(i, errname, atpercerr) # Writes absolute error in at%
# Also calculate for elements w/ multiple peaks (if present)
for key, value in multipeaklist.items():
templist=value # dictionary value is list of elem peak index positions
numlines=len(templist) # this is number of lines that are average (i.e. 2 for Fe&Fe2)
colname='%'+key
ratio=df.iloc[i][key]/df.iloc[i]['AESbasis']
df.set_value(i, colname, ratio)
# TODO need to propagate errors through Fe & Fe2
errlist=[] # list of errors in % (usually max of two)
for k, peak in enumerate(templist): # create new list with original elem peak from index positions
temp=match[match['Element']==peak] # finds entry for this peak
if len(temp)==1:
if temp.iloc[0]['Adjcnts']>0: # skip negative values
err=temp.iloc[0]['Erradjcnts']/temp.iloc[0]['Adjcnts']
errlist.append(err) # add this to list
# combine errors in quadrature
totalerr=0.0
for j, err in enumerate(errlist):
totalerr+=err**2
totalerr=np.sqrt(totalerr) # percent error in at %
# now get actual error
thisval=df.iloc[i][key] # this is averaged value computed above (possibly zero if below thresholds )
thiserr=thisval*totalerr # error (in Fe) as actual value based on average of multiple peaks
atpercerr=thiserr/df.iloc[i]['AESbasis']
errname='err%'+ key # error column
df.set_value(i, errname, atpercerr) # Writes absolute error in at%
# end of loop calculation for each spectrum
# organize data based on mycols template
df=organizecolumns(df,mycols)
return df
def calcadjcounts(df, AESquantparams, sig=2, kerrors=True):
'''For each elemental peak in interquantlog, calculate or recalcuated adjusted counts using k-factor2 and mass
result stored in adjcnts column and used for subsequent compositional determinations
can change AESquantresults and recalc at any time; sig (aka 2 sigma errors) is default setting
kerrors -- include error associated with kfactor (along with Poisson errors)'''
if 'Adjcnts' not in df:
df['Adjcnts']=0.0 # new column for adjusted amplitude (if not already present)
if 'Erradjcnts' not in df:
df['Erradjcnts']=0.0 # new column for associated error
if 'err%cnts' not in df:
df['err%cnts']=0.0 # percentage error only from counting statistics (not including kfactor err)
if 'err%total' not in df:
df['err%total']=0.0 # percentage error only from counting statistics (not including kfactor err)
# loop for each element, mask df, get appropriate k-factor & mass
df=df.reset_index(drop=True) # go ahead and reset index
elemlist=np.ndarray.tolist(df.Element.unique()) # list of unique elements from df
for i,elem in enumerate(elemlist):
match=AESquantparams[(AESquantparams['element']==elem)]
match=match.reset_index(drop=True)
kfactor2=match.iloc[0]['kfactor2'] # kfactor and mass for this element/peak
errkf2=match.iloc[0]['errkf2'] # percent error in above for integ method
mass=match.iloc[0]['mass']
elemmask=(df['Element']==elem) # mask for this element in loop
for j in range(0,len(df)): # loop and set adjamplitude to amp*kfact/mass
if elemmask[j]==True: # row has this element
newval=df.iloc[j]['Integcounts']*kfactor2/mass
percerr=sig/np.sqrt(df.iloc[j]['Integcounts']) # 2/sqrt(N) is percent error
totalerr=np.sqrt(errkf2**2+percerr**2) # combine in quadrature
err=newval*totalerr # error value is adjusted counts * 2 sig error percentage
df=df.set_value(j,'Adjcnts',newval)
df=df.set_value(j,'err%cnts',percerr)
df=df.set_value(j,'err%total',totalerr)
df=df.set_value(j,'Erradjcnts',err)
return df
''' TESTING
df=lowerfitpeak
'''
def makelinebackground(df, areanum, fitparams):
'''Create linear background under peak region
passed small slice of Augerfile df just peak region and small adjacent background '''
if fitparams[0]=='n/a': # prior linregresss problem
return df # return unmodified file
slope=fitparams[0]
intercept=fitparams[1]
backfitname='Backfit'+str(areanum)
for index,row in df.iterrows(): # blend between lower line and upper line
xval=df.loc[index]['Energy']
yval=slope*xval+intercept
df=df.set_value(index,backfitname,yval)
return df # return same df with interpolated background region added
def makeinterplinebackground(df, areanum, lowerfitparams, upperfitparams):
'''Create interpolated background from lower and upper peak fits
passed small slice of Augerfile df just peak region and small adjacent background '''
# check for n/a values
if lowerfitparams[0]=='n/a' or upperfitparams[0]=='n/a': # prior linregresss problem
return df # return unmodified file
lowslope=lowerfitparams[0]
lowintercept=lowerfitparams[1]
upslope=upperfitparams[0]
upintercept=upperfitparams[1]
backfitname='Backfit'+str(areanum)
if len(df)>0: # entire region passed should have no vals in backfit (only interpolated region)
evstep=1/(len(df)+1)
else:
print('Unspecified error in creating background')
return
startrow=df.iloc[0].name # index of first value
for index,row in df.iterrows(): # blend between lower line and upper line
xval=df.loc[index]['Energy']
yval=(1-evstep*(index-startrow))*(lowslope*xval+lowintercept)+evstep*(index-startrow)*(upslope*xval+upintercept)
df=df.set_value(index,backfitname,yval)
return df # return same df with interpolated background region added
def fitCapeak(df, areanum, elem, AugerFileName):
'''Pass appropriate chunk from Auger spectral dataframe, perform linear fit
return chunk with backfit column added '''
colname='Smcounts'+str(areanum)
backfitname='Backfit'+str(areanum)
xcol=df['Energy']
ycol=df[colname] # Counts1, Counts2 or whatever
# find relative minimum
try:
parabfunc=lambda x, a, b, c: a*x**2 + b*x + c # lambda definition of cubic poly
fitparams, cov =curve_fit(parabfunc, xcol, ycol) # scipy optimize
ss_res=np.dot((ycol-parabfunc(xcol,*fitparams)), (ycol-parabfunc(xcol,*fitparams))) # dot product of diff between data and function
ymean=np.mean(ycol) # mean of dataset
ss_tot=np.dot((ycol-ymean),(ycol-ymean))
R2=1-(ss_res/ss_tot) # coeff of determination
# diagonal of covariance matrix contains variances for fit params
except: # deal with common problems with linregress
print('Fitting error for', elem, ' in file ', AugerFileName)
fitparams=('n/a','n/a','n/a') # return all n/a
R2='n/a'
return df, fitparams, R2
for index,row in df.iterrows():
xval=df.loc[index]['Energy']
yval= fitparams[0] * xval**2+ fitparams[1] * xval + fitparams[2]
df=df.set_value(index, backfitname, yval)
return df, fitparams, R2
def makeCabackground(df, areanum, fitparams):
''' Fill background col of auger spe file with values derived from 2nd order poly fit (pass region under peak
not fitted by fit Ca peak (which only grabs adjacent background)'''
backfitname='Backfit'+str(areanum)
if len(fitparams)!=3: # prior fitting error already reported via print
return df
A=fitparams[0]
B=fitparams[1]
C=fitparams[2]
for index,row in df.iterrows(): # blend between lower line and upper line
xval=df.loc[index]['Energy']
yval=A*xval**2+ B* xval +C
df=df.set_value(index,backfitname,yval)
return df
def fitcubic(df, areanum, elem, AugerFileName):
'''Pass appropriate chunk from Auger spectral dataframe, perform cubic fit
return chunk with backfit column added '''
colname='Smcounts'+str(areanum) # use smoothed data for background fits
backfitname='Backfit'+str(areanum)
xcol=df['Energy']
ycol=df[colname] # Counts1, Counts2 or whatever
# find relative minimum
try:
cubicfunc=lambda x, a, b, c, d: a*x**3 + b*x**2 + c*x + d # lambda definition of cubic poly
fitparams, cov =curve_fit(cubicfunc, xcol, ycol) # scipy optimize
ss_res=np.dot((ycol-cubicfunc(xcol,*fitparams)), (ycol-cubicfunc(xcol,*fitparams))) # dot product of diff between data and function
ymean=np.mean(ycol) # mean of dataset
ss_tot=np.dot((ycol-ymean),(ycol-ymean))
R2=1-(ss_res/ss_tot) # coeff of determination
# TODO insert special handling for failed fits (some R2 threshold)
# Maybe restrictions on curvature
except: # deal with failed fit
print('Fitting error for', elem, ' in file ', AugerFileName)
fitparams=('n/a','n/a','n/a','n/a') # return all n/a
return df, fitparams
for index,row in df.iterrows():
xval=df.loc[index]['Energy']
yval= fitparams[0] * xval**3+ fitparams[1] * xval**2 + fitparams[2] * xval + fitparams[3]
df=df.set_value(index, backfitname, yval)
return df, fitparams, R2
def makecubicbackground(df, areanum, fitparams):
''' Fill background col of auger spe file with values derived from 2nd order poly fit (pass region under peak
not fitted by fit Ca peak (which only grabs adjacent background)'''
backfitname='Backfit'+str(areanum)
if len(fitparams)!=4: # prior fitting error already reported via print
return df
A=fitparams[0]
B=fitparams[1]
C=fitparams[2]
D=fitparams[3]
for index,row in df.iterrows(): # blend between lower line and upper line
xval=df.loc[index]['Energy']
yval= A * xval**3+ B * xval**2 + C * xval + D
df=df.set_value(index,backfitname,yval)
return df
'''
For background fit testing
Augerfile=pd.read_csv('C2010W_18Nov15_12231225.csv')
areanum=1
elem=Elemdata[0][0]
fittype=Elemdata[0][1]
integpeak=Elemdata[0][2]
lower1=Elemdata[0][3]
lower2=Elemdata[0][4]
upper1=Elemdata[0][5]
upper2=Elemdata[0][6]
df=fitregion
Augerfile.to_csv('C2010W_18Nov15_12231225.csv', index=False)
'''
''' TESTING OF BELOW FITS
plt.plot(xcol,ycol,'b-') # actual data in blue
plt.plot(xcol,gaussian(fitparams, xcol),'r-') # Gaussian fit in red
'''
def fitgauss(df, areanum, width, elem, AugerFileName, addgauss=True):
''' Gaussian fit of direct peaks (pass Augerfile just around peaks region
no need to save Gaussian fit, just return width and other params
integwidth pass from AESquantparams value'''
peakname='Peaks'+str(areanum)
# Remove nan values from peak region
df=df.dropna(subset=[peakname]) # remove nan entries from peak
# estimate initial Gaussian parameters from data
if df.empty: # deal with prior failed background fits (no data in this region after dropna
print('Gaussian fitting error for', elem, ' peak in file ', AugerFileName)
fitparams=('n/a','n/a','n/a','n/a') # return all n/a
rsquared='n/a'
ier='n/a'
return df, fitparams, rsquared, ier
xc=df[peakname].idxmax() # estimate center based on peak max index
xc=df.loc[xc]['Energy'] # associated energy value near center
peakarea=df[peakname].sum() # decent area estimate
y0=0 #
params0=[xc,width,peakarea,y0] # initial params list (first guess at gaussian params)
xcol=df['Energy']
ycol=df[peakname] # Counts1, Counts2 or whatever
xcol=xcol.as_matrix() # convert both to numpy matrices
ycol=ycol.as_matrix()
# define standard gaussian funct (xc, width, area and yoffset are init params)
gaussian=lambda params, x: params[3]+params[2]/(params[1]*np.sqrt(2*np.pi))*np.exp(-((x-params[0])**2/(2*params[1]**2)))
# thisgauss= gaussian(params0,xcol)
errfunc=lambda p, xcol, ycol: ycol- gaussian(p,xcol) # lambda error funct definition
# sigma2FWHM = lambda sigma: sigma * sqrt(2 * log(2)) * 2 / sqrt(2) # convert Gaussian widths to FWHM?
try:
fitparams, cov, infodict, mesg, ier =optimize.leastsq(errfunc,params0,args=(xcol,ycol),full_output=True)
ss_err=(infodict['fvec']**2).sum()
ss_tot=((ycol-ycol.mean())**2).sum()
rsquared=1-(ss_err/ss_tot)
except: # fitting problem
print('Gaussian fitting error for', elem, ' peak in file ', AugerFileName)
fitparams=('n/a','n/a','n/a','n/a') # return all n/a
rsquared='n/a'
ier='n/a'
return df, fitparams, rsquared, ier
if addgauss==True:
gaussname="Gauss"+str(areanum)
df[gaussname]='' # add col for gaussian fit
for index,row in df.iterrows():
xval=df.loc[index]['Energy']
yval=fitparams[3]+fitparams[2]/(fitparams[1]*np.sqrt(2*np.pi))*np.exp(-((xval-fitparams[0])**2/(2*fitparams[1]**2)))
df.set_value(index,gaussname,yval)
return df, fitparams, rsquared, ier
''' TESTING
For background fit testing
df=fitregion
Augerfile=pd.read_csv('C2010W_18Nov15_12231225.csv')
areanum=1
elem=Elemdata[1][0]
fittype=Elemdata[1][1]
integpeak=Elemdata[1][2]
lower1=Elemdata[1][3]
lower2=Elemdata[1][4]
upper1=Elemdata[1][5]
upper2=Elemdata[1][6]
integwidth=Elemdata[0][8]
if ier in [1,2,3,4]: print ('true')
'''
def findintegparams(Augerfile, Elements, AESquantparams, Shifts):
'''Grab integration width and expected counts peak position (also incorporates shift from deriv method)'''
halfwidths=[]
peakcenters=[]
Energyvals = Augerfile.Energy # for finding index #s corresponding to energy vals for this spectrum
for i, elem in enumerate(Elements):
thiselem=AESquantparams[AESquantparams['Element']==elem]
if len(thiselem)!=1:
print('WARNING ... AES quant parameters not found for ', elem)
halfwidths.append(4) # default integration width
peakcenters.append('n/a') #
return halfwidths, peakcenters
halfwidths.append(int((thiselem.iloc[0]['integwidth']-1)/2)) # integration uses half-width on either side of center
integpeakeV=thiselem.iloc[0]['negpeak']-thiselem.iloc[0]['integpeak']+Shifts[i] # shift of direct peak (defined relative to deriv peak)
temptuple=min(enumerate(Energyvals), key=lambda x: abs(x[1]-integpeakeV)) # tuple with index of closest and closest value
peakcenters.append(temptuple[0]) # first of tuple is closest index #
return halfwidths, peakcenters
def integpeaks(Augerfile, Backfitparams, areanum, Elements, Shifts, logmatch, AESquantparams):
''' Background fit for each direct peak, shift is list of energy shifts of negpeak (same order as Eledata (opens source spectrum as Augerfile,
fits peak backgrounds above and below using Elemdata, saves background to source csv (overwrites existing fits), also saves linear fit params to logdataframe with position/amplitude/etc;
desired elements out of data range are skipped (in prior findindices function)
backfitparams is all elements but only this Augerfile
'''
#create Smdifpeaks dataframe for temp storage of each peak's params
Backfitparams=Backfitparams.dropna(subset=['Rval1']) # skip integration/Gaussian fit if background fit failed
AugerFileName=logmatch.Filename #
# Create temp df to hold and pass linear fit data
mycols=['Filenumber', 'Filename', 'Filepath', 'Sample', 'Comments', 'Areanumber', 'Element', 'Integcounts',
'Backcounts', 'Significance', 'Xc', 'Width', 'Peakarea', 'Y0','Rsquared','Numchannels']
Integresults=pd.DataFrame(columns=mycols) # empty df for all integ results for elems in this spe file
peakname='Peaks'+str(areanum) # this is counts - background (but only calculated in vicinity of known elemental peaks)
backfitname='Backfit'+str(areanum)
# global shifts from smdifpeaks and local shift based on smoothed 2nd derivative
halfwidths, peakcenters=findintegparams(Augerfile, Elements, AESquantparams, Shifts)
# loop through and fit all peaks for each element in this spatial area
for i, elem in enumerate(Elements):
if i not in Backfitparams.index: # skips integ calc if backfit is n/a
continue
thisbackfit=Backfitparams[Backfitparams['Element']==elem]
if len(thisbackfit)!=1:
print('Problem retrieving fit boundaries for ',elem, ' in ', AugerFileName)
continue
lower1=thisbackfit.iloc[0]['Lower1']
upper2=thisbackfit.iloc[0]['Upper2']
fitregion=Augerfile[lower1:upper2+1]
if fitregion.empty==True: # skip if no data present (already should be skipped in Elemdata)
print('No data present for ', elem, ' in ', AugerFileName)
continue
# also need accurate lower/upper bounds ... available from backfitparams
Integresult=pd.DataFrame(index=np.arange(0,1),columns=mycols) # blank df row for this element
# get integpeak, kfact, integwidth, siglevel
# addgauss if save of gaussian peak fit in Augerfile is desired
# Probably could skip Gaussian fitting entirely if peak is weak (check smdiff)
fitregion, fitparams, rsquared, ier = fitgauss(fitregion, areanum, halfwidths[i], elem, AugerFileName, addgauss=True)
addgauss=True # maybe pass this arg from elsewhere
if addgauss==True and ier in [1,2,3,4]: # copy gaussian fit over to csv file if successful
gaussname="Gauss"+str(areanum)
if gaussname not in Augerfile.dtypes.index: # add col if not already present
Augerfile[gaussname]='' # add col for gaussian fit
# Copy gaussian fit to Augerfile... fitregion only modified in new Gauss peak fit column
Augerfile.loc[fitregion.index,fitregion.columns]=fitregion
# if gaussian fit is successful set center integration channel to index nearest xc
# ier flag of 1,2,3,4 if fit succeeds but rsquared threshold is better
if rsquared!='n/a': # skip integcounts calc but do put 'n/a' entries in df
if rsquared>0.4:
xc=fitparams[0] # center of gaussian fit
center=int(round(xc,0))
tempdf=fitregion[fitregion['Energy']==center]
try:
centerindex=tempdf[peakname].idxmax() # corresponding index # of peak maximum
except:
print('Gaussian fit center out of data range for ', elem, ' in ', AugerFileName)
# use center based on deriv shift and relative offset (index corresponding to integpeakeV)
centerindex=peakcenters[i] # backup method of finding center of integration region
else: # indication of poor Gaussian fit R2<0.4 (use prior knowledge of peak position)
print('Failed gaussian fit for ', elem, ' in ', AugerFileName)
# set center integration channel to value passed by integpeak
# this is ideal energy value but adjusted by shift found using smooth-diff quant method
centerindex=peakcenters[i] # already stores index number of central peak (ideal - sm-diff shift value)
# Still do the counts integration for poor gaussian fits
# perform integration over peak center channel + integwidth on either side
Augerpeak=Augerfile[centerindex-halfwidths[i]:centerindex+halfwidths[i]+1]
integcounts=Augerpeak[peakname].sum() # get counts sum
backgroundcnts=Augerpeak[backfitname].sum() # sum counts over identical width in background fit
# Used for peak significance i.e. typically 2 sigma of background integration over identical width
# full integ width is 1.2*FWHM but integwidth here is closest integer half-width
# Write fit params from tuple over to Integresult df
Integresult.iloc[0]['Integcounts']=integcounts
Integresult.iloc[0]['Backcounts']=backgroundcnts
Integresult.iloc[0]['Significance']=round(integcounts/(np.sqrt(backgroundcnts)),3)
# TODO add 2/sqrt(n) calc of associated percent error (also can calculate later)
Integresult.iloc[0]['Numchannels']=halfwidths[i]*2+1
Integresult.iloc[0]['Rsquared']=rsquared
Integresult.iloc[0]['Element']=elem
# These will be n/a if fit fails
Integresult.iloc[0]['Xc']=fitparams[0]
Integresult.iloc[0]['Width']=fitparams[1]
Integresult.iloc[0]['Peakarea']=fitparams[2]
Integresult.iloc[0]['Y0']=fitparams[3]
Integresults=Integresults.append(Integresult, ignore_index=True) # add row to list with valid
# end of loop through each element
# assign params that are common to all areas/all peaks into rows of df (copied from original log)
for index,row in Integresults.iterrows():
Integresults.loc[index]['Filenumber']=logmatch.Filenumber
Integresults.iloc[index]['Filename']=logmatch.Filename
Integresults.iloc[index]['Filepath']=logmatch.FilePath
Integresults.iloc[index]['Sample']=logmatch.Sample
Integresults.iloc[index]['Comments']=logmatch.Comments
Integresults.loc[index]['Areanumber']=areanum
Integresults=Integresults[mycols] # put back in original order
return Augerfile, Integresults # df with direct peak fitting info for all areas/ all elements
''' TESTING BACKGROUNDS
elem, fittype, integpeak, lower1, lower2, upper1, upper2, kfact, integwidth, siglevel=Elemdata[5]
'''
def fitlinregions(Augerfile, fitbounds, areanum, maxshift, elem, AugerFileName):
'''Simultaneously deal with linear fits below and above peak of interest
use residual/outlier detection to trim boundaries of linear fit regions (more robust than deriv threshold style
return lower and upper slopes/intercepts
background is either single linear fit or interpolated between the two
'''
cntname='Smcounts'+str(areanum) # can use Counts or Smcounts ... maybe smcounts is better for background fit
lowfitreg=Augerfile[fitbounds[0]:fitbounds[1]+1] # already tested for out-of-range in definefitreg
upfitreg=Augerfile[fitbounds[2]:fitbounds[3]+1]
# combine above and below regions (without possible added adjacent points)
xdata=np.concatenate((lowfitreg['Energy'][maxshift:-maxshift].as_matrix(),upfitreg['Energy'][maxshift:-maxshift].as_matrix()),axis=0)
ydata=np.concatenate((lowfitreg[cntname][maxshift:-maxshift].as_matrix(),upfitreg[cntname][maxshift:-maxshift].as_matrix()),axis=0)
# linear fit over both background regions
slope,intercept=np.polyfit(xdata, ydata, 1)
# manually compute residuals over entire range
xdata=np.concatenate((lowfitreg['Energy'].as_matrix(),upfitreg['Energy'].as_matrix()),axis=0)
ydata=np.concatenate((lowfitreg[cntname].as_matrix(),upfitreg[cntname].as_matrix()),axis=0)
yfit=slope*xdata+intercept
resid=np.subtract(ydata,yfit)
thresh=2*resid.std() # set threshold above which point is removed for having high residual
backregs=pd.concat([lowfitreg,upfitreg]) # combine both regions
backregs['Resid']=resid # paste in calculated residuals
excludelist=[] # clunky way to remove extra backfit points from fit if they exceed threshold (indicating beginning of a peak)
for index in range(int(lowfitreg.index.min()),int(lowfitreg.index.min())+maxshift):
if backregs.loc[index]['Resid']>thresh:
excludelist.append(index)
else:
break
for index in range(int(lowfitreg.index.max()),int(lowfitreg.index.max())-maxshift,-1):
if backregs.loc[index]['Resid']>thresh:
excludelist.append(index)
else:
break
for index in range(int(upfitreg.index.min()),int(upfitreg.index.min())+maxshift):
if backregs.loc[index]['Resid']>thresh:
excludelist.append(index)
else:
break
for index in range(int(lowfitreg.index.max()),int(lowfitreg.index.max())-maxshift,-1):
if backregs.loc[index]['Resid']>thresh:
excludelist.append(index)
else:
break
# Need to return modified lower1, lower2 ,upper1, upper2, lowrange, highrange make text string showing extent
# now filter dataframe to remove any points indicating presence of peak
thismask=lowfitreg.index.isin(excludelist)
lowfitreg=lowfitreg.loc[~thismask]
thismask=upfitreg.index.isin(excludelist)
upfitreg=upfitreg.loc[~thismask]
fitbounds=[lowfitreg.index.min(), lowfitreg.index.max(),upfitreg.index.min(), upfitreg.index.max()] # return list of refined boundaries
energybounds=[lowfitreg.Energy.min(), lowfitreg.Energy.max(),upfitreg.Energy.min(), upfitreg.Energy.max()]
thismask=backregs.index.isin(excludelist) #
backregs=backregs.loc[~thismask]
# now remove temporary residuals column
backregs=backregs.drop('Resid', axis=1, inplace=False)
# Now can refit using expanded data range
backfitname='Backfit'+str(areanum)
xcol=backregs['Energy']
ycol=backregs[cntname] # Counts1, Counts2 or Smcounts1, 2 whatever
try:
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(xcol, ycol)
except: # deal with common problems with linregress
print('Fitting error for', elem, ' in file ', AugerFileName)
fitparams=('n/a','n/a','n/a','n/a','n/a') # return all n/a
return backregs, fitparams, fitbounds, energybounds
# return the dataframe unmodified
fitparams=(slope, intercept, r_value, p_value, std_err) # tuple to return fitting results
for index,row in backregs.iterrows():
xval=backregs.loc[index]['Energy']
yval=slope * xval + intercept
backregs=backregs.set_value(index, backfitname, yval)
# fitbounds is index # boundaries of lower and upper fits (list of 4)
# energy bounds is energy range over which background was fitted
return backregs, fitparams, fitbounds, energybounds
def definefitreg(bound1, bound2, maxshift, Augerfile, evbreaks):
''' Widen fit region from standard size (based on allowed maxshift) and ensure that multiplex evbreaks are not included in the region
also make sure one doesn't go into region with no data'''
lowbound=bound1
for i in range(bound1, bound1-maxshift-1, -1): # lower by allowed shift but ensure not at data boundary
if i not in evbreaks and i in Augerfile.index: # also ensure we don't exit boundaries of dataset here
lowbound=i
else:
break
upbound=bound2
for i in range(bound2, bound2+maxshift+1): # lower by allowed shift but ensure not at data boundary
if i not in evbreaks and i in Augerfile.index:
upbound=i
else:
break
return lowbound, upbound # this is range of Auger slice that'll be used
def fitbackgrounds(Augerfile, areanum, Elements, Shifts, AESquantparams, logmatch):
''' takes element strings and element list and returns tuple for each elem symbol containing all params necessary to find each Auger peak from given spe file
tuple for integ peak is symbol, ideal peak index #, and integ kfactor'''
# integpeak is position of direct peak relative to ideal negative peak in smooth-diff S7D7
# lower1,lower2 and upper1,upper2 are boundaries of lower and higher energy linear backgroundfit (again energies relative to negpeak)
# Incorporate shifting of background fit regions into this section from ideal position based on savgol deriv
Energyvals = Augerfile.Energy #
evbreaks=logmatch.Evbreaks # needed to ensure fit boundaries don't cross into adjacent element
tempstring=evbreaks.split('[')[1] # remove brackets from list
tempstring=tempstring.split(']')[0]
evbreaks=[int(s) for s in tempstring.split(',')] # convert string to list of break index values
AugerFileName=logmatch.Filename #
mycols=['Filenumber', 'Filename', 'Filepath', 'Sample', 'Comments', 'Date', 'Areanumber', 'Element', 'Lower1', 'Lower2', 'Upper1',
'Upper2', 'Lowrange','Highrange','Peakshift', 'Fittype', 'P1','P2','P3','P4','Rval1', 'Pval1', 'Stderr1','Rval2', 'Pval2', 'Stderr2']
Backfitparams=pd.DataFrame(columns=mycols) # empty df
for i, elem in enumerate(Elements):
# find row in AESquantparams for this element
thiselemdata=AESquantparams[(AESquantparams['element']==elem)]
thiselemdata=thiselemdata.squeeze() # series with this elements params
thisshift=Shifts[i] # shift in eV/index # corresponding to this peak from prior smdif quant
if thisshift=='n/a': # peak not in smdifpeakslog ... usually data out of range
thisshift=0 # just set shift to zero to avoid problems
# integ peak position value is relative to negpeak in smooth-diff (i.e. -5 is 5 eV below ideal negpeak)
integpeakev=thiselemdata.negpeak + thiselemdata.integpeak # ideal energy value of negative Auger peak in smooth-diff spectrum
lower1ev=thiselemdata.negpeak + thiselemdata.lower1 + thisshift # lower bound of lower energy fit region
lower2ev=thiselemdata.negpeak + thiselemdata.lower2 + thisshift # upper bound of lower energy fit region
upper1ev=thiselemdata.negpeak + thiselemdata.upper1 + thisshift # lower bound of higher energy fit region
upper2ev=thiselemdata.negpeak + thiselemdata.upper2 + thisshift # upper bound of higher energy fit region
# width=int(thiselemdata.searchwidth) # search width used to find actual peak in real data
# find index # for ideal neg and pos peaks... use lambda funct.
# min(Energyvals, key=lambda x:abs(x-negpeakev)) gives value but not index #
# convert each energy value into index # (global shift already applied)
temptuple=min(enumerate(Energyvals), key=lambda x: abs(x[1]-integpeakev)) # tuple with index of closest and closest value
integpeak=temptuple[0] # first of tuple is index #
peakinrange=temptuple[1]-integpeakev # should be ~0 if desired peak is in data range
if abs(peakinrange)>0.5: # Must skip entire desired element here if it's out of range of the data in this particular spe
print(elem,' is out of data range for ', AugerFileName)
continue
fitbounds=[]
temptuple=min(enumerate(Energyvals), key=lambda x: abs(x[1]-lower1ev)) # tuple with index of closest and closest value
fitbounds.append(temptuple[0]) # first of tuple is index #
temptuple=min(enumerate(Energyvals), key=lambda x: abs(x[1]-lower2ev)) # tuple with index of closest and closest value
fitbounds.append(temptuple[0]) # first of tuple is index #
temptuple=min(enumerate(Energyvals), key=lambda x: abs(x[1]-upper1ev)) # tuple with index of closest and closest value
fitbounds.append(temptuple[0]) # first of tuple is index #
temptuple=min(enumerate(Energyvals), key=lambda x: abs(x[1]-upper2ev)) # tuple with index of closest and closest value
fitbounds.append(temptuple[0]) # first of tuple is index #
maxshift=int(thiselemdata.peakshift) # get allowed max energy shift in channels (normally 1eV/chan)
fittype=thiselemdata.fittype # default type of peak fit for given element
if fittype=='line':
fitbounds[0], fitbounds[1]= definefitreg(fitbounds[0], fitbounds[1], maxshift, Augerfile, evbreaks) # bounds for lower fit region
fitbounds[2], fitbounds[3]= definefitreg(fitbounds[2], fitbounds[3], maxshift, Augerfile, evbreaks) # bounds for upper fit region
# return fitpeakdf (new background fits), fitparams (slope,intercept, point fit range), R2 val (for tossing vals)
# Since linear fit may span both, pass both regions and deal with them simultaneously
fitpeak, fitparams, fitbounds, energybounds=fitlinregions(Augerfile, fitbounds, areanum, maxshift, elem, AugerFileName)
if fitparams[2]!='n/a': # holds R2 value and skip for failed fits
Augerfile.loc[fitpeak.index,fitpeak.columns]=fitpeak # Copy/save to original file
# Need to generate values for actual peak region from single linear fit
thispeak=Augerfile[fitbounds[1]:fitbounds[2]] # parse to get actual determined peak region
thispeak=makelinebackground(thispeak, areanum, fitparams)
Augerfile.loc[thispeak.index,thispeak.columns]=thispeak # copy peak region to source data file
elif fittype=='Ca': # special treatment
# find relative minimum if present between C falling edge and Ca peak
smcountname='Smcounts'+str(areanum)
minindex=Augerfile[fitbounds[0]:fitbounds[0]+10][smcountname].idxmin() # index value of min left of Ca peak (counts or smoothed counts)
# minval=Augerfile[lower1:lower1+10][countname].min()
# maxindex=Augerfile[integpeak-5:integpeak+5][countname].idxmax() # Ca peak index if present
# maxval=Augerfile[integpeak-5:integpeak+5][countname].max()
# polynomial fit over two pts at relative min left of peak and small region right of peak
thispeak=pd.concat([Augerfile[minindex-1:minindex+1],Augerfile[integpeak+10:integpeak+15]])
# Grab several points on low energy side, 2-3 pts
lowevrange=str(round(Augerfile[minindex-1:minindex+1]['Energy'].min(),0))+'-'+ str(round(Augerfile[minindex-1:minindex+1]['Energy'].min(),0))
# Get a few more at upper energy end
upperevrange=str(round(Augerfile[integpeak+10:integpeak+15]['Energy'].min(),0))+'-'+ str(round(Augerfile[integpeak+10:integpeak+15]['Energy'].max(),0))
thispeak, fitparams, R2 =fitCapeak(thispeak, areanum, elem, AugerFileName) # polynomial fit
if R2!='n/a': # only copy successful fits (skip n/a)
Augerfile.loc[thispeak.index,thispeak.columns]=thispeak # copy over to full spe file
thispeak=Augerfile[minindex+1:integpeak+11] # actual peak region
thispeak = makeCabackground(thispeak, areanum, fitparams) # now fill peak region with 2nd order poly background
Augerfile.loc[thispeak.index,thispeak.columns]=thispeak # copy peak region to source data file
# Make subtracted peak
countname='Counts'+str(areanum)
peakname='Peaks'+str(areanum)
backfitname='Backfit'+str(areanum)
for index in range(fitbounds[1],fitbounds[2]):
Augerfile.set_value(index, peakname, Augerfile.loc[index][countname]-Augerfile.loc[index][backfitname])
else:
print('Need to write fitting functions for fittype', fittype)
continue # next in loop to avoid errors below
# Make subtracted peak
countname='Counts'+str(areanum)
peakname='Peaks'+str(areanum)
backfitname='Backfit'+str(areanum)
for index in range(fitbounds[1],fitbounds[2]):
Augerfile.set_value(index, peakname, Augerfile.loc[index][countname]-Augerfile.loc[index][backfitname])
# Integration
# create single-rowed dataframe for backfitparams of this element (out-of-range data already skipped)
Backfitparamrow=pd.DataFrame(index=np.arange(0,1),columns=mycols)
# transfer common parameters
Backfitparamrow.iloc[0]['Areanumber']=areanum
Backfitparamrow.iloc[0]['Element']=elem
Backfitparamrow.iloc[0]['Peakshift']=Shifts[i] # shift of this elem's peak based on derivative method
Backfitparamrow.iloc[0]['Filenumber']=logmatch.Filenumber
Backfitparamrow.iloc[0]['Filename']=logmatch.Filename
Backfitparamrow.iloc[0]['Filepath']=logmatch.FilePath
Backfitparamrow.iloc[0]['Sample']=logmatch.Sample
Backfitparamrow.iloc[0]['Comments']=logmatch.Comments
Backfitparamrow.iloc[0]['Date']=logmatch.Date
Backfitparamrow.iloc[0]['Fittype']=fittype # string with type of background fit to attempt
if fittype=='line':
Backfitparamrow.iloc[0]['Lower1']=fitbounds[0] # save boundaries of fit regions
Backfitparamrow.iloc[0]['Lower2']=fitbounds[1]
Backfitparamrow.iloc[0]['Upper1']=fitbounds[2]
Backfitparamrow.iloc[0]['Upper2']=fitbounds[3]
Backfitparamrow.iloc[0]['Lowrange']=str(energybounds[0])+'-'+str(energybounds[1]) # string with lower fitted eV range
Backfitparamrow.iloc[0]['Highrange']=str(energybounds[2])+'-'+str(energybounds[3])# string with upper fitted eV range
Backfitparamrow.iloc[0]['P1']=fitparams[0] # slope for single fit
Backfitparamrow.iloc[0]['P2']=fitparams[1] # intercept for single fit
Backfitparamrow.iloc[0]['Rval1']=fitparams[2]
Backfitparamrow.iloc[0]['Pval1']=fitparams[3]
Backfitparamrow.iloc[0]['Stderr1']=fitparams[4]
if fittype=='Ca':
# copy from lowerfitparams to log df
Backfitparamrow.iloc[0]['Lowrange']=lowevrange
Backfitparamrow.iloc[0]['Highrange']=upperevrange
Backfitparamrow.iloc[0]['P1']=fitparams[0] # A*x2 coeff
Backfitparamrow.iloc[0]['P2']=fitparams[1] # B*x coeff
Backfitparamrow.iloc[0]['P3']=fitparams[2] # C coeff
Backfitparamrow.iloc[0]['Rval1']=R2
Backfitparams=Backfitparams.append(Backfitparamrow)
Backfitparams=Backfitparams[mycols]
return Augerfile, Backfitparams
def findfitregions(Augerfile, areanum, Elements, Shifts, AESquantparams, logmatch):
''' takes element strings and element list and returns tuple for each elem symbol containing all params necessary to find each Auger peak from given spe file
tuple for integ peak is symbol, ideal peak index #, and integ kfactor'''
# integpeak is position of direct peak relative to ideal negative peak in smooth-diff S7D7
# lower1,lower2 and upper1,upper2 are boundaries of lower and higher energy linear backgroundfit (again energies relative to negpeak)
# Incorporate shifting of background fit regions into this section from ideal position based on savgol deriv
Elemdata=[] # returns list of length5 tuples for all elements
Energyvals = Augerfile.Energy #
evbreaks=logmatch.Evbreaks # needed to ensure fit boundaries don't cross into adjacent element
tempstring=evbreaks.split('[')[1] # remove brackets from list
tempstring=tempstring.split(']')[0]
evbreaks=[int(s) for s in tempstring.split(',')] # convert string to list of break index values
for i, elem in enumerate(Elements):
# find row in AESquantparams for this element
thiselemdata=AESquantparams[(AESquantparams['element']==elem)]
thiselemdata=thiselemdata.squeeze() # series with this elements params
thisshift=Shifts[i] # shift in eV/index # corresponding to this peak from prior smdif quant
if thisshift=='n/a': # peak not in smdifpeakslog ... usually data out of range
thisshift=0 # just set shift to zero to avoid problems
# integ peak position value is relative to negpeak in smooth-diff (i.e. -5 is 5 eV below ideal negpeak)
integpeakev=thiselemdata.negpeak + thiselemdata.integpeak # ideal energy value of negative Auger peak in smooth-diff spectrum
lower1ev=thiselemdata.negpeak + thiselemdata.lower1 + thisshift # lower bound of lower energy fit region
lower2ev=thiselemdata.negpeak + thiselemdata.lower2 + thisshift # upper bound of lower energy fit region
upper1ev=thiselemdata.negpeak + thiselemdata.upper1 + thisshift # lower bound of higher energy fit region
upper2ev=thiselemdata.negpeak + thiselemdata.upper2 + thisshift # upper bound of higher energy fit region
# width=int(thiselemdata.searchwidth) # search width used to find actual peak in real data
# find index # for ideal neg and pos peaks... use lambda funct.
# min(Energyvals, key=lambda x:abs(x-negpeakev)) gives value but not index #
# convert each energy value into index # (global shift already applied)
temptuple=min(enumerate(Energyvals), key=lambda x: abs(x[1]-integpeakev)) # tuple with index of closest and closest value
integpeak=temptuple[0] # first of tuple is index #
peakinrange=temptuple[1]-integpeakev # should be ~0 if desired peak is in data range
if abs(peakinrange)<1: # Must skip entire desired element here if it's out of range of the data in this particular spe
temptuple=min(enumerate(Energyvals), key=lambda x: abs(x[1]-lower1ev)) # tuple with index of closest and closest value
lower1=temptuple[0] # first of tuple is index #
temptuple=min(enumerate(Energyvals), key=lambda x: abs(x[1]-lower2ev)) # tuple with index of closest and closest value
lower2=temptuple[0] # first of tuple is index #
temptuple=min(enumerate(Energyvals), key=lambda x: abs(x[1]-upper1ev)) # tuple with index of closest and closest value
upper1=temptuple[0] # first of tuple is index #
temptuple=min(enumerate(Energyvals), key=lambda x: abs(x[1]-upper2ev)) # tuple with index of closest and closest value
upper2=temptuple[0] # first of tuple is index #
shift=int(thiselemdata.peakshift) # get allowed max energy shift in channels (normally 1eV/chan)
tempparams =(lower1, lower2, upper1, upper2, shift) #
# Now call for adjustment of lower1,2 and upper 1,2 based on savgol column (single spe, single area, single elem region)
fitlimits = modfitreg(Augerfile, areanum, tempparams, evbreaks)
kfact=thiselemdata.kfactor2 # typical sensitivity k-factor associated with element for integration
siglevel=thiselemdata.siglevel # element dependent threshold for significance (# sigmas above background)
integwidth=int((thiselemdata.integwidth-1)/2) # integration width in channels for direct integration for this element
# total # of channels in AESquantparams but include n-1/2 channels on either side of peak center
fittype=thiselemdata.fittype # default type of peak fit for given element
#Elemdata is a list (of length number of elements) containing length5 tuples
elemtuple=(elem, fittype, integpeak, fitlimits[0], fitlimits[1], fitlimits[2], fitlimits[3], kfact, integwidth, siglevel) # add tuple with info for this element
Elemdata.append(elemtuple) # now contains proper limits on fitting regions
else:
AugerFileName=logmatch.Filename # logmatch is series
print('Warning: No quant for ',elem,' for ',AugerFileName, 'data not collected in this energy range.')
return Elemdata
def findpeakshifts(logmatch, areanum, Smdifpeakslog, Elements):
''' Find shifts of negpeak positions for each element in list for single spe file, return as list of floats
pass series with filename and given area
'''
# TODO problem if len(Elements)!=len(Shifts) due to couldn't find peak error
filename=logmatch.Filename # get number from Series
thispeakslog= Smdifpeakslog[(Smdifpeakslog['Filename']==filename)&(Smdifpeakslog['Areanumber']==areanum)]
# need to match area number and file number for finding unique shift for this elem
Shifts=[] # shift in peak position suggested by smdiff quant method
for i, elem in enumerate(Elements):
thiselem= thispeakslog[(thispeakslog['PeakID']==elem)]
if len(thiselem)!=1: # peaks not present should have already been removed
print ("Couldn't find ", elem, " peak for area", str(areanum),"of spectrum ", filename)
Shifts.append('n/a') # keeps len(Elements)== len(Shifts)
if len(thiselem)==1: # should be match for all peaks that are present
shift=thiselem.iloc[0]['Shift']
Shifts.append(shift)
return Shifts # list of energy shift relative to ideal negpeak for each elemental peak
def makesavgol(df, areanum, evbreaks):
'''Perform python smooth-diff used to guide selection of background regions
perform this in chunks between evbreaks (list), works for survey or multiplex, adds col to Augerfile and returns
evbreaks is list of index #s
'''
countsname='Counts'+str(areanum)
# add savgol column (only called if not present)
savgolname='Savgol'+str(areanum)
df[savgolname]=0.0 # add/initialize col for 2nd deriv Sav-gol
# Add 1 to last region boundary to avoid data truncation problem
evbreaks[-1]=evbreaks[-1]+1
for i in range(1,len(evbreaks)): # region 1 to nth region
thisreg=df.loc[evbreaks[i-1]:evbreaks[i]-1] # slice into separate multiplex regions and process separately
thisreg=thisreg[countsname] # convert to Series (keep these index)
myarr=np.asarray(thisreg) # convert to numpy array
window_size=11
deriv=2
order=2 # order of savgol fit
rate=1
order_range = range(order+1) # range object
half_window = (window_size -1) // 2 # type int
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
# b is matrix 3 by window size
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv) # series as long as array
# linalg.pinv gets pseudo-inverse of a matrix (window-sized series)
# .A of any matrix returns it as ndarray object
# Pad the signal at the extremes with values taken from the signal itself
firstvals = myarr[0] - np.abs(myarr[1:half_window+1][::-1] - myarr[0] )
lastvals = myarr[-1] + np.abs(myarr[-half_window-1:-1][::-1] - myarr[-1])
myarr= np.concatenate((firstvals, myarr, lastvals))
# Now convolve input signal and sav-gol processing 1D array .. thisreg is numpy array w/ savgol results
myarr=np.convolve( myarr, m[::-1], mode='valid')
thisreg.loc[evbreaks[i-1]:evbreaks[i]-1]=myarr # copies numpy array but keeps same indices
# for loop endpoint is 1 off from df indexing (due to different inclusion rules for last point of range)
for index in range(evbreaks[i-1],evbreaks[i]):
df.set_value(index,savgolname,thisreg.loc[index]) # copy vals from series into entire spe df
return df # returns savitsky-golay smooth diff over same full region
def integbatchquant(spelist, Smdifpeakslog, AESquantparams, Elements, overwrite=True):
''' Batch quantification of all peaks in Elements list and noise amplitude at all chosen background regions (Backregs)
returns df with peak positions, amplitudes, width, energy shift, etc. '''
# create empty dataframe for storing/passing linear fit params (same structure as in fitbackgrounds)
mycols=['Filenumber', 'Filename', 'Filepath', 'Sample', 'Comments', 'Date', 'Areanumber', 'Element', 'Lower1', 'Lower2', 'Upper1',
'Upper2', 'Lowrange','Highrange','Peakshift', 'Fittype', 'P1','P2','P3','P4','Rval1', 'Pval1', 'Stderr1','Rval2', 'Pval2', 'Stderr2']
Linearfitlog=pd.DataFrame(columns=mycols)
# TODO set up log for integration results
Integquantlog=pd.DataFrame(columns=['Filenumber', 'Filename', 'Filepath', 'Sample', 'Comments', 'Areanumber', 'Element', 'Integcounts',
'Backcounts', 'Significance', 'Xc', 'Width', 'Peakarea', 'Y0','Rsquared','Numchannels'])
for i in range(0,len(spelist)):
# get ith row from parameters log for subset of selected spe files (i.e. from spelist)
logmatch=spelist.iloc[i] #contains row with filename and all other parameters from a given spectra
logmatch=logmatch.squeeze() # convert/flatten to Series
numareas=int(logmatch.Areas) # get # of spatial areas for this spe
# load Auger spe file of interest here
AugerFileName=logmatch.Filename # get Auger filename from Series
Augerfile=pd.read_csv(AugerFileName) # read entire spectra into df
# now loop through any areas within this spectrum (typically only 1 area)
for areanum in range(1,numareas+1): # loop over each separate area in spe
# Now check to ensure this Augerfile has all necessary columns for this area
# print('Processing area ', areanum) TESTING
colname='Counts'+str(areanum)
if colname not in Augerfile:
print(colname, ' not present in file ', AugerFileName)
continue # skip to next area
backfitname='Backfit'+str(areanum)
if backfitname not in Augerfile: # add this background fit column if not present
Augerfile[backfitname]=np.nan
if overwrite==True: # clear all prior background regions
Augerfile[backfitname]=np.nan
savgolname='Savgol'+str(areanum) # Sav-gol 2nd deriv column used to guide selection of fitting regions
if savgolname not in Augerfile: # returns df with this Savgol column added
evbreaks=logmatch.Evbreaks # needed for possible savgol smooth-diff
tempstring=evbreaks.split('[')[1] # remove brackets from list
tempstring=tempstring.split(']')[0]
evbreaks=[int(s) for s in tempstring.split(',')] # convert string to list of break index values
Augerfile=makesavgol(Augerfile, areanum, evbreaks) # FUNCT pass full spectrum for given area (saved below)
peakname='Peaks'+str(areanum)
if peakname not in Augerfile: # add col for subtracted peak data
Augerfile[peakname]=np.nan
# Get list of negpeak shift for these elements (from Shift column of Smdifpeakslog)
Shifts=findpeakshifts(logmatch, areanum, Smdifpeakslog, Elements) # single shift val in eV for each elem
# Each area has its own Elemdata (selected background fit regions)
# Elemdata=findfitregions(Augerfile, areanum, Elements, Shifts, AESquantparams, logmatch)
Augerfile, Backfitparams=fitbackgrounds(Augerfile, areanum, Elements, Shifts, AESquantparams, logmatch)
# Linear background fits above and below plus interpolation between
# All params from linear fits of pre-peak and post-peak background stored in Backfitparams
# Peak gaussian fitting and integration subroutine
Augerfile, Integresults=integpeaks(Augerfile, Backfitparams, areanum, Elements, Shifts, logmatch, AESquantparams)
# append linear fit result from this spe/this area to longer master list
Linearfitlog=Linearfitlog.append(Backfitparams, ignore_index=True)
Integquantlog=Integquantlog.append(Integresults, ignore_index=True)
# direct save of modified auger csv with new linear background fits (after all areas processed)
Augerfile.to_csv(AugerFileName, index=False)
Linearfitlog=Linearfitlog[mycols] # put back in original order
return Linearfitlog, Integquantlog
|
tkcroat/Augerquant
|
Development/Auger_integquant_functions_13Nov16.py
|
Python
|
mit
| 64,741
|
[
"Gaussian"
] |
ba5324fb257cb2c735836c990cfb30c4dc9669089c56e6f360a807e21de5a1f9
|
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
import numpy as np
from pyscf import lib
from pyscf.pbc import gto as pgto
import pyscf.pbc.dft as pdft
from pyscf.pbc.df import fft, aft, mdf
##################################################
#
# port from ao2mo/eris.py
#
##################################################
from pyscf import lib
from pyscf.pbc import lib as pbclib
from pyscf.pbc.dft.gen_grid import gen_uniform_grids
from pyscf.pbc.dft.numint import eval_ao
from pyscf.pbc import tools
einsum = np.einsum
r"""
(ij|kl) = \int dr1 dr2 i*(r1) j(r1) v(r12) k*(r2) l(r2)
= (ij|G) v(G) (G|kl)
i*(r) j(r) = 1/N \sum_G e^{iGr} (G|ij)
= 1/N \sum_G e^{-iGr} (ij|G)
"forward" FFT:
(G|ij) = \sum_r e^{-iGr} i*(r) j(r) = fft[ i*(r) j(r) ]
"inverse" FFT:
(ij|G) = \sum_r e^{iGr} i*(r) j(r) = N * ifft[ i*(r) j(r) ]
= conj[ \sum_r e^{-iGr} j*(r) i(r) ]
"""
def general(cell, mo_coeffs, kpts=None, compact=0):
'''pyscf-style wrapper to get MO 2-el integrals.'''
assert len(mo_coeffs) == 4
if kpts is not None:
assert len(kpts) == 4
return get_mo_eri(cell, mo_coeffs, kpts)
def get_mo_eri(cell, mo_coeffs, kpts=None):
'''Convenience function to return MO 2-el integrals.'''
mo_coeff12 = mo_coeffs[:2]
mo_coeff34 = mo_coeffs[2:]
if kpts is None:
kpts12 = kpts34 = q = None
else:
kpts12 = kpts[:2]
kpts34 = kpts[2:]
q = kpts12[0] - kpts12[1]
#q = kpts34[1] - kpts34[0]
if q is None:
q = np.zeros(3)
mo_pairs12_kG = get_mo_pairs_G(cell, mo_coeff12, kpts12)
mo_pairs34_invkG = get_mo_pairs_invG(cell, mo_coeff34, kpts34, q)
return assemble_eri(cell, mo_pairs12_kG, mo_pairs34_invkG, q)
def get_mo_pairs_G(cell, mo_coeffs, kpts=None, q=None):
'''Calculate forward (G|ij) FFT of all MO pairs.
TODO: - Implement simplifications for real orbitals.
Args:
mo_coeff: length-2 list of (nao,nmo) ndarrays
The two sets of MO coefficients to use in calculating the
product |ij).
Returns:
mo_pairs_G : (ngrids, nmoi*nmoj) ndarray
The FFT of the real-space MO pairs.
'''
coords = gen_uniform_grids(cell)
if kpts is None:
q = np.zeros(3)
aoR = eval_ao(cell, coords)
ngrids = aoR.shape[0]
if np.array_equal(mo_coeffs[0], mo_coeffs[1]):
nmoi = nmoj = mo_coeffs[0].shape[1]
moiR = mojR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
else:
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR, mo_coeffs[1])
else:
if q is None:
q = kpts[1]-kpts[0]
aoR_ki = eval_ao(cell, coords, kpt=kpts[0])
aoR_kj = eval_ao(cell, coords, kpt=kpts[1])
ngrids = aoR_ki.shape[0]
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR_ki, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR_kj, mo_coeffs[1])
#mo_pairs_R = einsum('ri,rj->rij', np.conj(moiR), mojR)
mo_pairs_G = np.zeros([ngrids,nmoi*nmoj], np.complex128)
fac = np.exp(-1j*np.dot(coords, q))
for i in range(nmoi):
for j in range(nmoj):
mo_pairs_R_ij = np.conj(moiR[:,i])*mojR[:,j]
mo_pairs_G[:,i*nmoj+j] = tools.fftk(mo_pairs_R_ij, cell.mesh, fac)
return mo_pairs_G
def get_mo_pairs_invG(cell, mo_coeffs, kpts=None, q=None):
'''Calculate "inverse" (ij|G) FFT of all MO pairs.
TODO: - Implement simplifications for real orbitals.
Args:
mo_coeff: length-2 list of (nao,nmo) ndarrays
The two sets of MO coefficients to use in calculating the
product |ij).
Returns:
mo_pairs_invG : (ngrids, nmoi*nmoj) ndarray
The inverse FFTs of the real-space MO pairs.
'''
coords = gen_uniform_grids(cell)
if kpts is None:
q = np.zeros(3)
aoR = eval_ao(cell, coords)
ngrids = aoR.shape[0]
if np.array_equal(mo_coeffs[0], mo_coeffs[1]):
nmoi = nmoj = mo_coeffs[0].shape[1]
moiR = mojR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
else:
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR, mo_coeffs[1])
else:
if q is None:
q = kpts[1]-kpts[0]
aoR_ki = eval_ao(cell, coords, kpt=kpts[0])
aoR_kj = eval_ao(cell, coords, kpt=kpts[1])
ngrids = aoR_ki.shape[0]
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR_ki, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR_kj, mo_coeffs[1])
#mo_pairs_R = einsum('ri,rj->rij', np.conj(moiR), mojR)
mo_pairs_invG = np.zeros([ngrids,nmoi*nmoj], np.complex128)
fac = np.exp(1j*np.dot(coords, q))
for i in range(nmoi):
for j in range(nmoj):
mo_pairs_R_ij = np.conj(moiR[:,i])*mojR[:,j]
mo_pairs_invG[:,i*nmoj+j] = np.conj(tools.fftk(np.conj(mo_pairs_R_ij), cell.mesh, fac))
return mo_pairs_invG
def get_mo_pairs_G_old(cell, mo_coeffs, kpts=None, q=None):
'''Calculate forward (G|ij) and "inverse" (ij|G) FFT of all MO pairs.
TODO: - Implement simplifications for real orbitals.
Args:
mo_coeff: length-2 list of (nao,nmo) ndarrays
The two sets of MO coefficients to use in calculating the
product |ij).
Returns:
mo_pairs_G, mo_pairs_invG : (ngrids, nmoi*nmoj) ndarray
The FFTs of the real-space MO pairs.
'''
coords = gen_uniform_grids(cell)
if kpts is None:
q = np.zeros(3)
aoR = eval_ao(cell, coords)
ngrids = aoR.shape[0]
if np.array_equal(mo_coeffs[0], mo_coeffs[1]):
nmoi = nmoj = mo_coeffs[0].shape[1]
moiR = mojR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
else:
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR, mo_coeffs[1])
else:
if q is None:
q = kpts[1]-kpts[0]
aoR_ki = eval_ao(cell, coords, kpt=kpts[0])
aoR_kj = eval_ao(cell, coords, kpt=kpts[1])
ngrids = aoR_ki.shape[0]
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR_ki, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR_kj, mo_coeffs[1])
mo_pairs_R = np.einsum('ri,rj->rij', np.conj(moiR), mojR)
mo_pairs_G = np.zeros([ngrids,nmoi*nmoj], np.complex128)
mo_pairs_invG = np.zeros([ngrids,nmoi*nmoj], np.complex128)
fac = np.exp(-1j*np.dot(coords, q))
for i in range(nmoi):
for j in range(nmoj):
mo_pairs_G[:,i*nmoj+j] = tools.fftk(mo_pairs_R[:,i,j], cell.mesh, fac)
mo_pairs_invG[:,i*nmoj+j] = np.conj(tools.fftk(np.conj(mo_pairs_R[:,i,j]), cell.mesh,
fac.conj()))
return mo_pairs_G, mo_pairs_invG
def assemble_eri(cell, orb_pair_invG1, orb_pair_G2, q=None):
'''Assemble 4-index electron repulsion integrals.
Returns:
(nmo1*nmo2, nmo3*nmo4) ndarray
'''
if q is None:
q = np.zeros(3)
coulqG = tools.get_coulG(cell, -1.0*q)
ngrids = orb_pair_invG1.shape[0]
Jorb_pair_G2 = np.einsum('g,gn->gn',coulqG,orb_pair_G2)*(cell.vol/ngrids**2)
eri = np.dot(orb_pair_invG1.T, Jorb_pair_G2)
return eri
def get_ao_pairs_G(cell, kpt=np.zeros(3)):
'''Calculate forward (G|ij) and "inverse" (ij|G) FFT of all AO pairs.
Args:
cell : instance of :class:`Cell`
Returns:
ao_pairs_G, ao_pairs_invG : (ngrids, nao*(nao+1)/2) ndarray
The FFTs of the real-space AO pairs.
'''
coords = gen_uniform_grids(cell)
aoR = eval_ao(cell, coords, kpt) # shape = (coords, nao)
ngrids, nao = aoR.shape
gamma_point = abs(kpt).sum() < 1e-9
if gamma_point:
npair = nao*(nao+1)//2
ao_pairs_G = np.empty([ngrids, npair], np.complex128)
ij = 0
for i in range(nao):
for j in range(i+1):
ao_ij_R = np.conj(aoR[:,i]) * aoR[:,j]
ao_pairs_G[:,ij] = tools.fft(ao_ij_R, cell.mesh)
#ao_pairs_invG[:,ij] = ngrids*tools.ifft(ao_ij_R, cell.mesh)
ij += 1
ao_pairs_invG = ao_pairs_G.conj()
else:
ao_pairs_G = np.zeros([ngrids, nao,nao], np.complex128)
for i in range(nao):
for j in range(nao):
ao_ij_R = np.conj(aoR[:,i]) * aoR[:,j]
ao_pairs_G[:,i,j] = tools.fft(ao_ij_R, cell.mesh)
ao_pairs_invG = ao_pairs_G.transpose(0,2,1).conj().reshape(-1,nao**2)
ao_pairs_G = ao_pairs_G.reshape(-1,nao**2)
return ao_pairs_G, ao_pairs_invG
def get_ao_eri(cell, kpt=np.zeros(3)):
'''Convenience function to return AO 2-el integrals.'''
ao_pairs_G, ao_pairs_invG = get_ao_pairs_G(cell, kpt)
eri = assemble_eri(cell, ao_pairs_invG, ao_pairs_G)
if abs(kpt).sum() < 1e-9:
eri = eri.real
return eri
##################################################
#
# ao2mo/eris.py end
#
##################################################
cell = pgto.Cell()
cell.atom = 'He 1. .5 .5; C .1 1.3 2.1'
cell.basis = {'He': [(0, (2.5, 1)), (0, (1., 1))],
'C' :'gth-szv',}
cell.pseudo = {'C':'gth-pade'}
cell.a = np.eye(3) * 2.5
cell.mesh = [21] * 3
cell.build()
np.random.seed(1)
kpts = np.random.random((4,3))
kpts[3] = kpts[0]-kpts[1]+kpts[2]
kpt0 = np.zeros(3)
cell1 = pgto.Cell()
cell1.atom = 'He 1. .5 .5; He .1 1.3 2.1'
cell1.basis = {'He': [(0, (2.5, 1)), (0, (1., 1))]}
cell1.a = np.eye(3) * 2.5
cell1.mesh = [21] * 3
cell1.build()
kdf0 = mdf.MDF(cell1)
kdf0.auxbasis = 'weigend'
kdf0.mesh = [21] * 3
kdf0.kpts = kpts
def tearDownModule():
global cell, cell1, kdf0
del cell, cell1, kdf0
class KnownValues(unittest.TestCase):
def test_get_pp_loc_part1_high_cost(self):
df = aft.AFTDF(cell)
v1 = aft.get_pp_loc_part1(df, kpts[0])
self.assertAlmostEqual(lib.fp(v1), (-6.0893491060887159+0.19823828749533859j), 8)
def test_aft_get_nuc(self):
df = aft.AFTDF(cell)
v1 = df.get_nuc(kpts[0])
self.assertAlmostEqual(lib.fp(v1), (-5.764786312608102+0.19126292955145852j), 8)
def test_aft_get_pp(self):
v0 = pgto.pseudo.get_pp(cell, kpts[0])
v1 = aft.AFTDF(cell).get_pp(kpts)
self.assertTrue(np.allclose(v0, v1[0], atol=1e-5, rtol=1e-5))
self.assertAlmostEqual(lib.fp(v1[0]), (-5.6240305085898807+0.22094834207603817j), 8)
v0 = pgto.pseudo.get_pp(cell, kpts[1])
self.assertTrue(np.allclose(v0, v1[1], atol=1e-5, rtol=1e-5))
self.assertAlmostEqual(lib.fp(v1[1]), (-5.53877585793+1.043933371359j) ,8)
self.assertAlmostEqual(lib.fp(v1[2]), (-6.05309558678+0.281728966073j), 8)
self.assertAlmostEqual(lib.fp(v1[3]), (-5.60115995450+0.275973062529j), 8)
def test_aft_get_ao_eri(self):
df0 = fft.FFTDF(cell1)
df = aft.AFTDF(cell1)
eri0 = df0.get_ao_eri(compact=True)
eri1 = df.get_ao_eri(compact=True)
self.assertAlmostEqual(abs(eri0-eri1).max(), 0, 9)
eri0 = df0.get_ao_eri(kpts[0])
eri1 = df.get_ao_eri(kpts[0])
self.assertAlmostEqual(abs(eri0-eri1).max(), 0, 9)
eri0 = df0.get_ao_eri(kpts)
eri1 = df.get_ao_eri(kpts)
self.assertAlmostEqual(abs(eri0-eri1).max(), 0, 9)
def test_aft_get_ao_eri_high_cost(self):
df0 = fft.FFTDF(cell)
df = aft.AFTDF(cell)
eri0 = df0.get_ao_eri(compact=True)
eri1 = df.get_ao_eri(compact=True)
self.assertTrue(np.allclose(eri0, eri1, atol=1e-5, rtol=1e-5))
self.assertAlmostEqual(lib.fp(eri1), 0.80425361966560172, 8)
eri0 = df0.get_ao_eri(kpts[0])
eri1 = df.get_ao_eri(kpts[0])
self.assertTrue(np.allclose(eri0, eri1, atol=1e-5, rtol=1e-5))
self.assertAlmostEqual(lib.fp(eri1), (2.9346374476387949-0.20479054936779137j), 8)
eri0 = df0.get_ao_eri(kpts)
eri1 = df.get_ao_eri(kpts)
self.assertTrue(np.allclose(eri0, eri1, atol=1e-5, rtol=1e-5))
self.assertAlmostEqual(lib.fp(eri1), (0.33709287302019619-0.94185725020966538j), 8)
def test_get_eri_gamma(self):
odf0 = mdf.MDF(cell1)
odf = aft.AFTDF(cell1)
ref = odf0.get_eri()
eri0000 = odf.get_eri(compact=True)
self.assertTrue(eri0000.dtype == numpy.double)
self.assertTrue(np.allclose(eri0000, ref, atol=1e-6, rtol=1e-6))
self.assertAlmostEqual(lib.fp(eri0000), 0.23714016293926865, 9)
def test_get_eri_gamma(self):
odf = aft.AFTDF(cell1)
ref = kdf0.get_eri((kpts[0],kpts[0],kpts[0],kpts[0]))
eri1111 = odf.get_eri((kpts[0],kpts[0],kpts[0],kpts[0]))
self.assertTrue(np.allclose(eri1111, ref, atol=1e-6, rtol=1e-6))
self.assertAlmostEqual(lib.fp(eri1111), (1.2410388899583582-5.2370501878355006e-06j), 9)
eri1111 = odf.get_eri((kpts[0]+1e-8,kpts[0]+1e-8,kpts[0],kpts[0]))
self.assertTrue(np.allclose(eri1111, ref, atol=1e-6, rtol=1e-6))
self.assertAlmostEqual(lib.fp(eri1111), (1.2410388899583582-5.2370501878355006e-06j), 9)
def test_get_eri_0011(self):
odf = aft.AFTDF(cell1)
ref = kdf0.get_eri((kpts[0],kpts[0],kpts[1],kpts[1]))
eri0011 = odf.get_eri((kpts[0],kpts[0],kpts[1],kpts[1]))
self.assertTrue(np.allclose(eri0011, ref, atol=1e-3, rtol=1e-3))
self.assertAlmostEqual(lib.fp(eri0011), (1.2410162858084512+0.00074485383749912936j), 9)
ref = fft.FFTDF(cell1).get_mo_eri([numpy.eye(cell1.nao_nr())]*4, (kpts[0],kpts[0],kpts[1],kpts[1]))
eri0011 = odf.get_eri((kpts[0],kpts[0],kpts[1],kpts[1]))
self.assertTrue(np.allclose(eri0011, ref, atol=1e-9, rtol=1e-9))
self.assertAlmostEqual(lib.fp(eri0011), (1.2410162860852818+0.00074485383748954838j), 9)
def test_get_eri_0110(self):
odf = aft.AFTDF(cell1)
ref = kdf0.get_eri((kpts[0],kpts[1],kpts[1],kpts[0]))
eri0110 = odf.get_eri((kpts[0],kpts[1],kpts[1],kpts[0]))
self.assertTrue(np.allclose(eri0110, ref, atol=1e-6, rtol=1e-6))
eri0110 = odf.get_eri((kpts[0]+1e-8,kpts[1]+1e-8,kpts[1],kpts[0]))
self.assertTrue(np.allclose(eri0110, ref, atol=1e-6, rtol=1e-6))
self.assertAlmostEqual(lib.fp(eri0110), (1.2928399254827956-0.011820590601969154j), 9)
ref = fft.FFTDF(cell1).get_mo_eri([numpy.eye(cell1.nao_nr())]*4, (kpts[0],kpts[1],kpts[1],kpts[0]))
eri0110 = odf.get_eri((kpts[0],kpts[1],kpts[1],kpts[0]))
self.assertTrue(np.allclose(eri0110, ref, atol=1e-9, rtol=1e-9))
self.assertAlmostEqual(lib.fp(eri0110), (1.2928399254827956-0.011820590601969154j), 9)
eri0110 = odf.get_eri((kpts[0]+1e-8,kpts[1]+1e-8,kpts[1],kpts[0]))
self.assertTrue(np.allclose(eri0110, ref, atol=1e-9, rtol=1e-9))
self.assertAlmostEqual(lib.fp(eri0110), (1.2928399254827956-0.011820590601969154j), 9)
def test_get_eri_0123(self):
odf = aft.AFTDF(cell1)
ref = kdf0.get_eri(kpts)
eri1111 = odf.get_eri(kpts)
self.assertAlmostEqual(abs(eri1111-ref).max(), 0, 9)
self.assertAlmostEqual(lib.fp(eri1111), (1.2917759427391706-0.013340252488069412j), 9)
ref = fft.FFTDF(cell1).get_mo_eri([numpy.eye(cell1.nao_nr())]*4, kpts)
self.assertAlmostEqual(abs(eri1111-ref).max(), 0, 9)
def test_get_mo_eri(self):
df0 = fft.FFTDF(cell1)
odf = aft.AFTDF(cell1)
nao = cell1.nao_nr()
numpy.random.seed(5)
mo =(numpy.random.random((nao,nao)) +
numpy.random.random((nao,nao))*1j)
eri_mo0 = df0.get_mo_eri((mo,)*4, kpts)
eri_mo1 = odf.get_mo_eri((mo,)*4, kpts)
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
kpts_t = (kpts[2],kpts[3],kpts[0],kpts[1])
eri_mo2 = df0.get_mo_eri((mo,)*4, kpts_t)
eri_mo2 = eri_mo2.reshape((nao,)*4).transpose(2,3,0,1).reshape(nao**2,-1)
self.assertTrue(np.allclose(eri_mo2, eri_mo0, atol=1e-7, rtol=1e-7))
eri_mo0 = df0.get_mo_eri((mo,)*4, (kpts[0],)*4)
eri_mo1 = odf.get_mo_eri((mo,)*4, (kpts[0],)*4)
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
eri_mo0 = df0.get_mo_eri((mo,)*4, (kpts[0],kpts[1],kpts[1],kpts[0],))
eri_mo1 = odf.get_mo_eri((mo,)*4, (kpts[0],kpts[1],kpts[1],kpts[0],))
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
eri_mo0 = df0.get_mo_eri((mo,)*4, (kpt0,kpt0,kpts[0],kpts[0],))
eri_mo1 = odf.get_mo_eri((mo,)*4, (kpt0,kpt0,kpts[0],kpts[0],))
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
eri_mo0 = df0.get_mo_eri((mo,)*4, (kpts[0],kpts[0],kpt0,kpt0,))
eri_mo1 = odf.get_mo_eri((mo,)*4, (kpts[0],kpts[0],kpt0,kpt0,))
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
mo1 = mo[:,:nao//2+1]
eri_mo0 = df0.get_mo_eri((mo1,mo,mo,mo1), (kpts[0],)*4)
eri_mo1 = odf.get_mo_eri((mo1,mo,mo,mo1), (kpts[0],)*4)
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
eri_mo0 = df0.get_mo_eri((mo1,mo,mo1,mo), (kpts[0],kpts[1],kpts[1],kpts[0],))
eri_mo1 = odf.get_mo_eri((mo1,mo,mo1,mo), (kpts[0],kpts[1],kpts[1],kpts[0],))
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
def test_init_aft_1d(self):
cell = pgto.Cell()
cell.atom = 'He 1. .5 .5; He .1 1.3 2.1'
cell.basis = {'He': [(0, (2.5, 1)), (0, (1., 1))]}
cell.a = np.eye(3) * 2.5
cell.dimension = 1
cell.mesh = [3, 3, 3]
cell.build()
f = aft.AFTDF(cell)
np.random.seed(1)
f.kpts = np.random.random((4,3))
f.check_sanity()
if __name__ == '__main__':
print("Full Tests for aft")
unittest.main()
|
sunqm/pyscf
|
pyscf/pbc/df/test/test_aft.py
|
Python
|
apache-2.0
| 18,913
|
[
"PySCF"
] |
03cac9607f3780898ffef44f495a627f05e483026237ecb1d9468a3e7d706d14
|
# Version: 0.18
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/warner/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other langauges) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
### Unicode version strings
While Versioneer works (and is continually tested) with both Python 2 and
Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
Newer releases probably generate unicode version strings on py2. It's not
clear that this is wrong, but it may be surprising for applications when then
write these strings to a network connection or include them in bytes-oriented
APIs like cryptographic checksums.
[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
this question.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
alchemistry/alchemlyb
|
versioneer.py
|
Python
|
bsd-3-clause
| 68,573
|
[
"Brian"
] |
797d2ae0c8391ce801fb2363238d3cef45090af9fd2e6202325921023c27cd8d
|
from struct import pack, unpack
import SimpleHTTPServer
import SocketServer
import sys
import json
import importlib
PORT = 8052
class MyRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def handleGameStringApi(self):
global backend
self.send_response(200)
self.send_header('Content-type','text/html')
self.send_header('Access-Control-Allow-Origin', 'http://44670.org')
self.end_headers()
ret = backend.readGameStringForHttpApi()
if (ret is None):
ret = {'status': 'failed'}
else:
lastSuccessReadForHttpApi = ret
self.wfile.write(json.dumps(ret))
return
#def log_message(self, format, *args):
#return
def do_GET(self):
if self.path.startswith('/str'):
self.handleGameStringApi()
return
if self.path == '/':
self.path = 'index.html'
return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
if len(sys.argv) < 2:
print("""
Usage:
python server.py backend_name
Example:
python server.py psp.lxzs1
""")
exit(0)
backend = importlib.import_module('backends.' + sys.argv[1])
Handler = MyRequestHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "Text extraction service started at port: ", PORT
print "Visit http://44670.org/fanyi for translations!"
httpd.serve_forever()
|
44670/buyonghenmafanhenleijiunengfanyiyouxi
|
server.py
|
Python
|
gpl-3.0
| 1,447
|
[
"VisIt"
] |
79f8c76ab38f54dd430ae9b3afd67b8d57600145dac488b25780487da8cc849b
|
# -*- coding: utf-8 -*-
#
# pymicro documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 22 11:17:23 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import mock
MOCK_MODULES = ['numpy', 'scipy', 'scipy.special', 'matplotlib',
'matplotlib.pyplot', 'matplotlib.image', 'matplotlib.cm',
'matplotlib.colors', 'matplotlib.figure',
'matplotlib.backend_bases',
'matplotlib.backends.backend_qt4agg', 'scipy.ndimage', 'wx',
'wx.Panel', 'vtk', 'vtk.util', 'vtk.util.colors', 'skimage',
'skimage.transform', '_tifffile', 'h5py', 'tables',
'lxml', 'lxml.builder', 'BasicTools',
'BasicTools.Containers',
'BasicTools.Containers.ConstantRectilinearMesh',
'BasicTools.Containers.UnstructuredMesh',
'BasicTools.Containers.UnstructuredMeshCreationTools',
'BasicTools.Containers.MeshBase','BasicTools.IO',
'BasicTools.IO.XdmfTools', 'PyQt5', 'PyQt5.QtWidgets',
'PyQt5.QtCore']
# , 'ImPanel', 'PlotPanel']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.append(os.path.abspath('sphinxext'))
print(sys.path)
print('content of .')
print(os.listdir('.'))
print('content of ..')
print(os.listdir('..'))
print('content of ../..')
print(os.listdir('../..'))
try:
import pymicro
print('** successfully imported pymicro')
from pymicro.crystal.lattice import Lattice
print('** successfully imported Lattice')
except:
print('problem during import')
pass
try:
import gen_rst
print('** successfully imported gen_rst')
except:
pass
# -- General configuration -----------------------------------------------------
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.1'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinxcontrib.bibtex',
'gen_rst',
'nbsphinx']
bibtex_bibfiles = ['bibliography.bib']
mathjax_path = 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
autodoc_member_order = 'bysource'
autodoc_default_flags = ['members', 'special-members', 'undoc-members', 'show-inheritance']
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pymicro'
copyright = u'2013-2020, Henry Proudhon'
# The project version is extracted from the main __init__.py
def extract_version():
"""
Extracts version values from the main __init__.py and
returns them as a dictionary.
"""
with open('../pymicro/__init__.py') as fd:
for line in fd.readlines():
if (line.startswith('__version__')):
exec (line.strip())
return locals()["__version__"]
version = '%s' % extract_version()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages.
print(os.environ)
if not 'READTHEDOCS' in os.environ.keys(): # if building locally
html_theme = 'sphinx_rtd_theme'
html_theme_path = ["../../sphinx_rtd_theme"] # the theme should be installed aside the pymicro folder
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
html_theme_options['canonical_url'] = 'http://pymicro.readthedocs.io/en/latest/'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pymicrodoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pymicro.tex', u'pymicro Documentation',
u'Henry Proudhon', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pymicro', u'pymicro Documentation',
[u'Henry Proudhon'], 1)
]
|
heprom/pymicro
|
docs/conf.py
|
Python
|
mit
| 9,419
|
[
"CRYSTAL",
"VTK"
] |
07994fdebd0ff6928498e6678887937eb8972ff432c5f38923896b0e1b08b034
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hyperparameters and ranges common to multiple models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import zip # pylint: disable=redefined-builtin
from tensor2tensor.utils import registry
import tensorflow as tf
@registry.register_hparams("basic_1")
def basic_params1():
"""A set of basic hyperparameters."""
return tf.contrib.training.HParams(
# If the problem consists of variable-length sequences
# (see problem.batch_size_means_tokens()), then this is the number
# of tokens per batch per GPU or per TPU core. Otherwise, this is
# the number of examples per GPU or per TPU core.
batch_size=4096,
batch_shuffle_size=512,
# If True, then if the features are of variable length, the batch_size is
# used as the actual batch size (and not tokens per batch).
use_fixed_batch_size=False,
num_hidden_layers=4,
kernel_height=3,
kernel_width=1,
hidden_size=64,
compress_steps=0,
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
dropout=0.2,
clip_grad_norm=2.0,
grad_noise_scale=0.0,
summarize_grads=False,
# Flag for whether mlperf mode is on
mlperf_mode=False,
# Whether to log the name and size of every variable
summarize_vars=False,
initializer="orthogonal",
initializer_gain=1.5,
label_smoothing=0.1,
optimizer="Adam",
optimizer_adam_epsilon=1e-6,
optimizer_adam_beta1=0.85,
optimizer_adam_beta2=0.997,
optimizer_momentum_momentum=0.9,
optimizer_momentum_nesterov=False,
optimizer_adafactor_beta1=0.0,
optimizer_adafactor_beta2=0.999,
optimizer_adafactor_factored=True,
optimizer_adafactor_decay_type="pow",
optimizer_adafactor_memory_exponent=0.8,
optimizer_adafactor_clipping_threshold=1.0,
optimizer_adafactor_multiply_by_parameter_scale=True,
# Number of accumulating steps for multi step optimizers.
optimizer_multistep_accumulate_steps=None,
weight_decay=1e-6,
weight_noise=0.0,
# Defines the learning rate as a product of named functions.
# Available functions are listed in learning_rate._LEARNING_RATE_FUNCTIONS
# e.g. "constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size"
learning_rate_schedule="legacy",
learning_rate_constant=1.0,
# If learning_rate_schedule=="legacy",
# then we specify decay scheme here. Warmup is always exponential,
# except with "noam" learning rate decay scheme.
# see optimize.legacy_learning_rate_schedule()
# TODO(noam): migrate everyone away from this.
learning_rate_decay_scheme="none",
# decay_steps and decay_staircase for learning_rate_decay_scheme=="exp"
learning_rate_decay_steps=5000,
learning_rate_decay_staircase=False,
learning_rate_minimum=None,
learning_rate_decay_rate=1.0,
learning_rate_warmup_steps=100,
learning_rate_cosine_cycle_steps=250000,
learning_rate=0.1,
sampling_method="argmax", # "argmax" or "random"
sampling_temp=1.0, # temperature for sampling
# expand the logits a piece at a time - saves memory.
factored_logits=False,
multiply_embedding_mode="sqrt_depth",
# Parameters related to mixtures of experts.
moe_hidden_sizes="2048", # hidden layer sizes (comma-separated)
moe_num_experts=64, # number of experts per layer
moe_k=2, # how many experts to use for each batch element
moe_loss_coef=1e-2,
# Sequences of operations to perform on layer input and layer output.
# Used by common_layers.layer_preprocess, common_layers.layer_postprocess
# Each character represents an operation:
# none: no preprocessing
# d: apply dropout
# n: apply normalization (see norm_type and norm_epsilon)
# a: add layer input (residual connection - only during postprocess)
# The special string "none" is used instead of the empty string
# to indicate no pre/postprocessing, since the empty string causes
# trouble for hyperparameter tuning.
# TODO(noam): The current settings ("", "dan") are the published version
# of the transformer. ("n", "da") seems better for harder-to-learn
# models, so it should probably be the default.
layer_preprocess_sequence="none",
layer_postprocess_sequence="dan",
# dropout rate to use during layer_preprocess and layer_postprocess
layer_prepostprocess_dropout=0.1,
# broadcast dimensions for layer_prepostprocess_dropout
# a comma-separated list of integers.
# see common_layers.dropout_with_broadcast_dims()
# Change this to "1" to save memory.
layer_prepostprocess_dropout_broadcast_dims="",
# dropout some symbols (set them to 0) before embedding.
symbol_dropout=0.0,
# What type of normalization to use
norm_type="layer", # "batch", layer", "noam", "none".
# epsilon parameter to normalization function
norm_epsilon=1e-6,
symbol_modality_num_shards=1,
# pad vocabularies so that this value divides the vocabulary size.
vocab_divisor=1,
# During training, we drop sequences whose inputs and targets are shorter
# than min_length
min_length=0,
# During training, we drop sequences whose inputs or targets are longer
# than max_length.
# If max_length==0, we use hparams.batch_size instead.
max_length=0,
# Maximum length in the smallest length bucket. Setting this
# flag too high will result in wasteful padding of short
# sequences. Due to some (hopefully) temporary hacks in the
# data reading and batching code, setting this flag too low
# results in a very long batch-shuffling queue.
# TODO(noam): change this once the Datasets API changes.
min_length_bucket=8,
# This flag controls the number of length buckets in the data
# reader. The buckets have maximum lengths from
# min_bucket_length to (max_length or batch_size), increasing
# (approximately) by factors of length_bucket_step.
length_bucket_step=1.1,
# If set to True, drop sequences longer than max_length during eval.
# This affects the validity of the evaluation metrics.
eval_drop_long_sequences=False,
# If True, run the model autoregressively instead of teacher-forcing
# during eval
eval_run_autoregressive=False,
# TODO(lukaszkaiser): these parameters should probably be set elsewhere.
# (SymbolModality) - If this flag is on, we try to share all of the input
# embeddings, the target embeddings and the softmax weights.
shared_embedding_and_softmax_weights=False,
# (SymbolModality) - If this flag is on, we try to share the input
# embeddings and the target embeddings.
# You can also share the input embeddings with the target embeddings
# by using a problem_hparams that uses the same modality object for
# the input modality and target modality.
shared_embedding=False,
# In SymbolModality, skip the top layer, assume we're providing logits.
symbol_modality_skip_top=False,
# Modalities used to map from features to a space compatible with
# chosen model architecture. It comprises key-value pairs of a feature
# name (str) and its modality class.
modality={},
# The maximum length of "input" sequence.
# Sequences longer than this value will be truncated. 0 or negative values
# mean there is no maximum or truncation.
# You can change this behavior by overriding preprocess_example() method
# in your problem class.
max_input_seq_length=0,
# The maximum length of "target" sequence.
# Sequences longer than this value will be truncated. 0 or negative values
# mean there is no maximum or truncation.
# You can change this behavior by overriding preprocess_example() method
# in your problem class.
max_target_seq_length=0,
# if nonzero, we split the target sequences on example read.
# This is for use with language modeling problems with fixed length
# examples. e.g. The examples may be written with length 65536, but we
# want to split each example into 64 examples of length 1024.
split_to_length=0,
# Video settings: how many frames to batch on input and targets.
video_num_input_frames=1,
video_num_target_frames=1,
# This flag allows us to optionally treat a seq-to-seq problem
# as a language model. Legal values are:
#
# "none" - Do not prepend the inputs to the targets.
# "prepend_inputs_masked_attention"
# replace "targets" in preprocessing with
# tf.concat([inputs, [0], targets], axis=1)
# i.e. we prepend the inputs to the targets with a single
# padding token in between. Use masked self-attention on the
# entire resulting sequence. During training, we compute losses on
# the combined sequence. During eval, we compute the metrics
# on only the targets portion.
# "prepend_inputs_full_attention"
# similar to the previous option except that each
# position in the inputs portion can see the
# entire inputs portion. This removes the challenge of
# autoregressively predicting the inputs portion.
prepend_mode="none",
# Scheduled sampling is interesting for auto-regressive models.
# It runs an additional step using the generated output as autoregressive
# targets, which can improve the models inference results later. The
# parameter scheduled_sampling_prob determines with what probability
# will such additional step be run. It's turned off (0.0) by default.
# This probability will exponentially warm up for the number of
# steps determined by scheduled_sampling_warmup_steps.
# The tensor used for the second step will consist of outputs from
# the first step mixed with gold truth, with the proportion of gold
# determined by scheduled_sampling_gold_mixin_prob.
scheduled_sampling_prob=0.0,
scheduled_sampling_warmup_steps=50000,
scheduled_sampling_gold_mixin_prob=0.5,
# This setting controls whether to copy variables around in a daisy chain
# (if true) or leave their placement to TensorFlow. It only affects multi
# device training and mostly should be turned on for performance. One
# exception are recurrent models: with dynamic loops it must be off.
daisy_chain_variables=True,
# If True in PREDICT mode, then last-position-only optimizations are not
# used.
force_full_predict=False,
# Set this for pure model parallelism. There is only one data shard.
no_data_parallelism=False,
# dtype used for activations. - "float32" or "bfloat16"
# activation_dtype="bfloat16" currently only works on TPU.
# It lowers activation-memory usage
# and does not appear to affect quality.
# You can train on TPU with activation_dtype="bfloat16" and evaluate
# on CPU/GPU with activation_dtype="float32"
activation_dtype="float32",
# dtype used for parameters: "float32" or "bfloat16"
# bfloat16 currently only works with optimizer="adafactor".
# The savings in memory allow for training larger models.
# Weights are encoded as (w*128)^8, using pseudostochastic
# roundoff. Initial experiments show that model quality is similar
# to baseline for about 3M training steps, but worse thereafter.
weight_dtype="float32",
# Directory containing a checkpoint for a pretrained model. This will only
# be used if a new run is being started. Parameters not found in the
# pretrained model will be randomly initialized. Superfluous parameters in
# the pretrained model will be ignored.
pretrained_model_dir="",
# Threshold used for two cases: the primary task probability for the
# constant mixing schedule, and the exponential schedule limit for when
# mixing should stop (eg: 0.5 means stop at 50-50 mixing, 0.8 means stop
# at 20-80 mixing for the primary-others mixing case.)
multiproblem_schedule_threshold=0.5,
# The number of examples at which the proportion of the mixed in datasets
# is multiproblem_schedule_threshold
multiproblem_schedule_max_examples=1e7,
# When training multiproblems, we can mix the data according to different
# schedules. Example: a constant schedule mixing 20-80 between the primary
# and other tasks.
# A list of supported schedules can be found in
# `data_generators.multi_problem.py`.
multiproblem_mixing_schedule="constant",
# A scalar to upweight the classifier loss in a multiproblem setting.
multiproblem_class_loss_multiplier=0.0,
# A boolean that decides whether input sequence losses and target label
# losses in classification problems should be reweighted.
multiproblem_reweight_label_loss=False,
# How much weight the targets in classification problems receive. Inputs
# receive 1 minus this weight.
multiproblem_label_weight=0.5,
# Hyperparameters for relative attention.
# The maximum relative positional distance to learn an embedding for.
max_relative_position=0,
# If heads share the same relative embedding.
heads_share_relative_embedding=False,
# If relative embedding terms are added to values too.
add_relative_to_values=False,
# If enable the host_call which is executed every training step.
# There could be a performance drop if host_call function is slow and
# cannot keep up with the TPU-side computation.
tpu_enable_host_call=False,
# Pad batch dim of inputs to nearest multiple of batch multiple.
pad_batch=False,
# When true, do not evaluate on the language model data when running the
# multiproblem since it can take a while. If False, set eval_steps to
# something large like 6000 or 10000.
multiproblem_target_eval_only=False,
# Max out the vocab size to a power of 2 for efficiency and to reserve
# extra space in the vocabulary for new task ids and label classes.
multiproblem_vocab_size=-1,
# When using multiproblem with generation tasks, need to truncate the
# inputs and targets manually before concatenating them.
multiproblem_max_input_length=-1,
multiproblem_max_target_length=-1,
# If positive, makes training targets fixed-length in MultiProblem.
multiproblem_fixed_train_length=-1
)
class RangedHParams(object):
"""Defines parameter ranges for tuning."""
# From ParameterConfig proto
LINEAR_SCALE = 1
LOG_SCALE = 2
REVERSE_LOG_SCALE = 3
SCALES_STR = {
LINEAR_SCALE: "UNIT_LINEAR_SCALE",
LOG_SCALE: "UNIT_LOG_SCALE",
REVERSE_LOG_SCALE: "UNIT_REVERSE_LOG_SCALE",
}
def __init__(self):
self._categorical_params = {}
self._discrete_params = {}
self._float_params = {}
self._int_params = {}
def _check_reset_and_type_change(self, name, orig_ctr):
"""Check if name is in orig_ctr or in one of the other type containers."""
# Resetting a hyperparameter
if name in orig_ctr:
tf.logging.warning("Overwriting hparam %s", name)
ctr_names = [
(self._categorical_params, "categorical"),
(self._discrete_params, "discrete"),
(self._float_params, "float"),
(self._int_params, "int"),
]
ctrs, names = list(zip(*ctr_names))
orig_name = names[ctrs.index(orig_ctr)]
for ctr, ctr_name in ctr_names:
if ctr is orig_ctr:
continue
# Using a different type for the same hyperparameter name
if name in ctr:
raise ValueError("Setting hyperparameter %s as type %s, but a "
"hyperparemeter of the same name was originally "
"registered as type %s" % (name, ctr_name, orig_name))
def set_categorical(self, name, categories, length=None):
self._check_reset_and_type_change(name, self._categorical_params)
self._categorical_params[name] = (name, categories, length)
def set_discrete(self, name, feasible_points, scale=None, length=None):
self._check_reset_and_type_change(name, self._discrete_params)
self._discrete_params[name] = (name, feasible_points, scale, length)
def set_float(self, name, min_val, max_val, scale=None, length=None):
self._check_reset_and_type_change(name, self._float_params)
self._float_params[name] = (name, min_val, max_val, scale, length)
def set_int(self, name, min_val, max_val, scale=None, length=None):
self._check_reset_and_type_change(name, self._int_params)
self._int_params[name] = (name, min_val, max_val, scale, length)
def fix_select_params(self, hp):
ctrs = [
self._categorical_params, self._discrete_params, self._float_params,
self._int_params
]
for key, val in hp.values().iteritems():
for ctr in ctrs:
if key in ctr:
del ctr[key]
self.set_discrete(key, [val])
def to_parameter_specs(self, name_prefix=""):
"""To list of dicts suitable for Cloud ML Engine hyperparameter tuning."""
specs = []
for name, categories, _ in self._categorical_params.values():
spec = {
"parameterName": name_prefix + name,
"type": "CATEGORICAL",
"categoricalValues": categories,
}
specs.append(spec)
for name, feasible_points, scale, _ in self._discrete_params.values():
spec = {
"parameterName": name_prefix + name,
"type": "DISCRETE",
"discreteValues": feasible_points,
}
if scale:
spec["scaleType"] = self.SCALES_STR[scale]
specs.append(spec)
for name, min_val, max_val, scale, _ in self._float_params.values():
spec = {
"parameterName": name_prefix + name,
"type": "DOUBLE",
"minValue": min_val,
"maxValue": max_val,
}
if scale:
spec["scaleType"] = self.SCALES_STR[scale]
specs.append(spec)
for name, min_val, max_val, scale, _ in self._int_params.values():
spec = {
"parameterName": name_prefix + name,
"type": "INTEGER",
"minValue": min_val,
"maxValue": max_val,
}
if scale:
spec["scaleType"] = self.SCALES_STR[scale]
specs.append(spec)
return specs
@registry.register_ranged_hparams("basic1")
def basic_range1(ranged_hparams):
"""A basic range of hyperparameters."""
rhp = ranged_hparams
rhp.set_discrete("batch_size", [1024, 2048, 4096])
rhp.set_discrete("num_hidden_layers", [1, 2, 3, 4, 5, 6])
rhp.set_discrete("hidden_size", [32, 64, 128, 256, 512], scale=rhp.LOG_SCALE)
rhp.set_discrete("kernel_height", [1, 3, 5, 7])
rhp.set_discrete("kernel_width", [1, 3, 5, 7])
rhp.set_discrete("compress_steps", [0, 1, 2])
rhp.set_float("dropout", 0.0, 0.5)
rhp.set_float("weight_decay", 1e-4, 10.0, scale=rhp.LOG_SCALE)
rhp.set_float("label_smoothing", 0.0, 0.2)
rhp.set_float("clip_grad_norm", 0.01, 50.0, scale=rhp.LOG_SCALE)
rhp.set_float("learning_rate", 0.005, 2.0, scale=rhp.LOG_SCALE)
rhp.set_categorical("initializer",
["uniform", "orthogonal", "uniform_unit_scaling"])
rhp.set_float("initializer_gain", 0.5, 3.5)
rhp.set_categorical("learning_rate_decay_scheme",
["none", "sqrt", "noam", "exp"])
rhp.set_float("optimizer_adam_epsilon", 1e-7, 1e-2, scale=rhp.LOG_SCALE)
rhp.set_float("optimizer_adam_beta1", 0.8, 0.9)
rhp.set_float("optimizer_adam_beta2", 0.995, 0.999)
rhp.set_categorical(
"optimizer",
["Adam", "Adagrad", "Momentum", "RMSProp", "SGD", "YellowFin"])
@registry.register_ranged_hparams
def basic_moe_range(rhp):
"""Moe range; when this parameter is unused, it allows us to see variance."""
rhp.set_float("moe_loss_coef", 0.01, 0.02)
|
mlperf/training_results_v0.5
|
v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/layers/common_hparams.py
|
Python
|
apache-2.0
| 20,898
|
[
"MOE"
] |
5ebb5bba774a14b165c11e776590c6b507f9fcca5a0b2d112e1042aa4e32a9bc
|
# -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
A Brianer
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Specials.Simulaters.Runner"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
from ShareYourSystem.Standards.Noders import Noder
from ShareYourSystem.Specials.Simulaters import Populater
import operator
#</ImportSpecificModules>
#<DefineClass>
@DecorationClass(**{
'ClassingSwitchMethodStrsList':['brian']
})
class BrianerClass(BaseClass):
#Definition
RepresentingKeyStrsList=[
'BrianingTimeDimensionVariable',
'BrianingPrintRunIsBool',
'BrianedNetworkVariable',
'BrianedClocksList',
'BrianedSimulationClock',
'BrianedNeuronGroupsList',
'BrianedStateMonitorsList',
'BrianedSpikeMonitorsList',
'BrianedConnectionsList',
]
def default_init(self,
_BrianingTimeDimensionVariable=None,
_BrianingPrintRunIsBool=True,
_BrianedNetworkVariable=None,
_BrianedClocksList=None,
_BrianedSimulationClock=None,
_BrianedNeuronGroupsList=None,
_BrianedStateMonitorsList=None,
_BrianedSpikeMonitorsList=None,
_BrianedConnectionsList=None,
**_KwargVariablesDict
):
#Call the parent __init__ method
BaseClass.__init__(self,**_KwargVariablesDict)
def mimic_run(self):
#brian first
self.brian()
#parent method
BaseClass.run(self)
#debug
self.debug('We start running in brian')
#run with the brian method
self.BrianedNetworkVariable.run(
self.RunningTimeFloat*self.BrianingTimeDimensionVariable
)
#debug
self.debug('We stop running in brian')
def do_brian(self):
#network first
self.network(
**{
'RecruitingConcludeConditionVariable':[
(
'__class__.__mro__',
operator.contains,Populater.PopulaterClass
)
]
}
)
"""
#populate
map(
lambda __NetworkedDeriveConnecter:
__NetworkedDeriveConnecter.populate(),
self.NetworkedDeriveConnectersList
)
"""
#set the different times
self.BrianedStepTimeFloatsList=list(
set(
SYS.flat(
map(
lambda __BrianingDerivePopulater:
SYS.unzip(
__BrianingDerivePopulater.MoniteringStateTuplesList,
[2]
) if len(
__BrianingDerivePopulater.MoniteringStateTuplesList
)>0 else [],
self.NetworkedDeriveConnectersList
)
)
)
)
#debug
'''
self.debug(('self.',self,['BrianedStepTimeFloatsList']))
'''
#import
import brian
#Check
if self.BrianingTimeDimensionVariable==None:
self.BrianingTimeDimensionVariable=brian.ms
#init
self.BrianedNetworkVariable=brian.MagicNetwork()
#set the clocks
self.BrianedSimulationClock=brian.Clock(
dt=self.SimulatingStepTimeFloat*self.BrianingTimeDimensionVariable
)
self.BrianedClocksDict=dict(
map(
lambda __BrianedStepTimeFloat:
(
str(__BrianedStepTimeFloat),
brian.Clock(
dt=__BrianedStepTimeFloat*self.BrianingTimeDimensionVariable
)
),
self.BrianedStepTimeFloatsList
)
,**{
str(
self.SimulatingStepTimeFloat
):self.BrianedSimulationClock
}
)
#debug
self.debug(('self.',self,['BrianedClocksDict']))
#set clock to the neuron groups
self.BrianedNeuronGroupsList=map(
lambda __BrianingDerivePopulater:
__BrianingDerivePopulater.__setitem__(
'NeuronGroup',
brian.NeuronGroup(
__BrianingDerivePopulater.PopulatingUnitsInt,
__BrianingDerivePopulater.PopulatingEquationStr,
clock=self.BrianedClocksDict[str(self.SimulatingStepTimeFloat)]
)
).NeuronGroup,
self.NetworkedDeriveConnectersList
)
#set the clocks and state monitors
self.BrianedStateMonitorsList=SYS.flat(
map(
lambda __BrianingDerivePopulater:
map(
lambda __MoniteringStateTuple:
__BrianingDerivePopulater.__setitem__(
str(__MoniteringStateTuple)+'StateMonitor',
getattr(
brian,
'StateMonitor'
)(
__BrianingDerivePopulater.NeuronGroup,
__MoniteringStateTuple[0],
record=__MoniteringStateTuple[1],
clock=self.BrianedClocksDict[str(__MoniteringStateTuple[2])]
)
).SettingValueVariable,
__BrianingDerivePopulater.MoniteringStateTuplesList
),
self.NetworkedDeriveConnectersList
)
)
#set the spike monitors
self.BrianedSpikeMonitorsList=SYS.flat(
map(
lambda __BrianingDerivePopulater:
map(
lambda __MoniteringSpikeTuple:
__BrianingDerivePopulater.__setitem__(
str(__MoniteringSpikeTuple)+'SpikeMonitor',
getattr(
brian,
'SpikeMonitor'
)(
__BrianingDerivePopulater.NeuronGroup,
)
).SettingValueVariable,
__BrianingDerivePopulater.MoniteringSpikeTuplesList
),
self.NetworkedDeriveConnectersList
)
)
#set the post synapses
self.BrianedSpikeMonitorsList=SYS.flat(
map(
lambda __BrianingDerivePopulater:
map(
lambda __MoniteringSpikeTuple:
__BrianingDerivePopulater.__setitem__(
str(__MoniteringSpikeTuple)+'SpikeMonitor',
getattr(
brian,
'SpikeMonitor'
)(
__BrianingDerivePopulater.NeuronGroup,
)
).SettingValueVariable,
__BrianingDerivePopulater.MoniteringSpikeTuplesList
),
self.NetworkedDeriveConnectersList
)
)
#debug
'''
self.debug(('self.',self,['NetworkedConnectionTuplesList']))
'''
'''
#set connections
self.BrianedConnectionsList=map(
lambda __ConnectionTuple:
map(
lambda __ListedVariable:
__ConnectionTuple[0].__setitem__(
str(
(
__ConnectionTuple[0].NodeKeyStr,
__ListedVariable.NodeKeyStr
)
)+'Connection',
brian.Connection(
__ConnectionTuple[0].NeuronGroup,
__ListedVariable.NeuronGroup
)
).SettingValueVariable,
__ConnectionTuple[1][0]
)+map(
lambda __ListedVariable:
__ListedVariable.__setitem__(
str(
(
__ListedVariable.NodeKeyStr,
__ConnectionTuple[0].NodeKeyStr
)
)+'Connection',
brian.Connection(
__ListedVariable.NeuronGroup,
__ConnectionTuple[0].NeuronGroup
)
).SettingValueVariable,
__ConnectionTuple[1][1]
),
self.NetworkedConnectionTuplesList
)
'''
"""
#debug
'''
self.debug(('self.',self,['BrianedNeuronGroupsList']))
'''
#alias
BrianedNetworkVariable=self.BrianedNetworkVariable
#add
map(
lambda __BrianedVariable:
BrianedNetworkVariable.add(__BrianedVariable),
self.BrianedNeuronGroupsList+self.BrianedConnectionsList+self.BrianedMonitorsList
)
#Check
if self.BrianingPrintRunIsBool:
#debug
self.debug(('self.',self,[
'BrianedSimulationClock'
]))
#define
@brian.network_operation(
self.BrianedSimulationClock
)
def printControl():
#Print Time
print(
"time is "+str(
self.BrianedSimulationClock.t*self.BrianingTimeDimensionVariable
)
)
'''
#Print NeuronGroup
print(
"variables are"+str(
self.BrianedNeuronGroupsList[0]
)
)
'''
self.BrianedNetworkVariable.add(printControl);
"""
#</DefineClass>
|
Ledoux/ShareYourSystem
|
Pythonlogy/draft/Simulaters/Brianer/draft/__init__ copy.py
|
Python
|
mit
| 7,536
|
[
"Brian",
"NEURON"
] |
e90b4457231e4845d73718ed545b80b1b5b0fe6bbe799591fc3934acb2983fcf
|
import unittest
import numpy as np
from spglib import get_symmetry
from vasp import read_vasp_from_strings
Al222 = """Al
1.0
8.0786564361761641 0.0000000000000000 0.0000000000000000
0.0000000000000000 8.0786564361761641 0.0000000000000000
0.0000000000000000 0.0000000000000000 8.0786564361761641
32
Direct
0.0000000000000000 0.0000000000000000 0.0000000000000000
0.5000000000000000 0.0000000000000000 0.0000000000000000
0.0000000000000000 0.5000000000000000 0.0000000000000000
0.5000000000000000 0.5000000000000000 0.0000000000000000
0.0000000000000000 0.0000000000000000 0.5000000000000000
0.5000000000000000 0.0000000000000000 0.5000000000000000
0.0000000000000000 0.5000000000000000 0.5000000000000000
0.5000000000000000 0.5000000000000000 0.5000000000000000
0.0000000000000000 0.2500000000000000 0.2500000000000000
0.5000000000000000 0.2500000000000000 0.2500000000000000
0.0000000000000000 0.7500000000000000 0.2500000000000000
0.5000000000000000 0.7500000000000000 0.2500000000000000
0.0000000000000000 0.2500000000000000 0.7500000000000000
0.5000000000000000 0.2500000000000000 0.7500000000000000
0.0000000000000000 0.7500000000000000 0.7500000000000000
0.5000000000000000 0.7500000000000000 0.7500000000000000
0.2500000000000000 0.0000000000000000 0.2500000000000000
0.7500000000000000 0.0000000000000000 0.2500000000000000
0.2500000000000000 0.5000000000000000 0.2500000000000000
0.7500000000000000 0.5000000000000000 0.2500000000000000
0.2500000000000000 0.0000000000000000 0.7500000000000000
0.7500000000000000 0.0000000000000000 0.7500000000000000
0.2500000000000000 0.5000000000000000 0.7500000000000000
0.7500000000000000 0.5000000000000000 0.7500000000000000
0.2500000000000000 0.2500000000000000 0.0000000000000000
0.7500000000000000 0.2500000000000000 0.0000000000000000
0.2500000000000000 0.7500000000000000 0.0000000000000000
0.7500000000000000 0.7500000000000000 0.0000000000000000
0.2500000000000000 0.2500000000000000 0.5000000000000000
0.7500000000000000 0.2500000000000000 0.5000000000000000
0.2500000000000000 0.7500000000000000 0.5000000000000000
0.7500000000000000 0.7500000000000000 0.5000000000000000"""
sym_ops_str = """ 1 0 0 0 1 0 0 0 1 0.0000000 0.0000000 0.0000000
-1 0 0 0 -1 0 0 0 -1 0.0000000 0.0000000 0.0000000
0 0 -1 0 1 0 1 0 0 0.0000000 0.0000000 0.0000000
0 0 1 0 -1 0 -1 0 0 0.0000000 0.0000000 0.0000000
-1 0 0 0 1 0 0 0 -1 0.0000000 0.0000000 0.0000000
1 0 0 0 -1 0 0 0 1 0.0000000 0.0000000 0.0000000
0 0 1 0 1 0 -1 0 0 0.0000000 0.0000000 0.0000000
0 0 -1 0 -1 0 1 0 0 0.0000000 0.0000000 0.0000000
1 0 0 0 -1 0 0 0 -1 0.0000000 0.0000000 0.0000000
-1 0 0 0 1 0 0 0 1 0.0000000 0.0000000 0.0000000
0 0 -1 0 -1 0 -1 0 0 0.0000000 0.0000000 0.0000000
0 0 1 0 1 0 1 0 0 0.0000000 0.0000000 0.0000000
-1 0 0 0 -1 0 0 0 1 0.0000000 0.0000000 0.0000000
1 0 0 0 1 0 0 0 -1 0.0000000 0.0000000 0.0000000
0 0 1 0 -1 0 1 0 0 0.0000000 0.0000000 0.0000000
0 0 -1 0 1 0 -1 0 0 0.0000000 0.0000000 0.0000000
0 1 0 0 0 1 1 0 0 0.0000000 0.0000000 0.0000000
0 -1 0 0 0 -1 -1 0 0 0.0000000 0.0000000 0.0000000
0 1 0 1 0 0 0 0 -1 0.0000000 0.0000000 0.0000000
0 -1 0 -1 0 0 0 0 1 0.0000000 0.0000000 0.0000000
0 1 0 0 0 -1 -1 0 0 0.0000000 0.0000000 0.0000000
0 -1 0 0 0 1 1 0 0 0.0000000 0.0000000 0.0000000
0 1 0 -1 0 0 0 0 1 0.0000000 0.0000000 0.0000000
0 -1 0 1 0 0 0 0 -1 0.0000000 0.0000000 0.0000000
0 -1 0 0 0 -1 1 0 0 0.0000000 0.0000000 0.0000000
0 1 0 0 0 1 -1 0 0 0.0000000 0.0000000 0.0000000
0 -1 0 -1 0 0 0 0 -1 0.0000000 0.0000000 0.0000000
0 1 0 1 0 0 0 0 1 0.0000000 0.0000000 0.0000000
0 -1 0 0 0 1 -1 0 0 0.0000000 0.0000000 0.0000000
0 1 0 0 0 -1 1 0 0 0.0000000 0.0000000 0.0000000
0 -1 0 1 0 0 0 0 1 0.0000000 0.0000000 0.0000000
0 1 0 -1 0 0 0 0 -1 0.0000000 0.0000000 0.0000000
0 0 1 1 0 0 0 1 0 0.0000000 0.0000000 0.0000000
0 0 -1 -1 0 0 0 -1 0 0.0000000 0.0000000 0.0000000
1 0 0 0 0 -1 0 1 0 0.0000000 0.0000000 0.0000000
-1 0 0 0 0 1 0 -1 0 0.0000000 0.0000000 0.0000000
0 0 -1 -1 0 0 0 1 0 0.0000000 0.0000000 0.0000000
0 0 1 1 0 0 0 -1 0 0.0000000 0.0000000 0.0000000
-1 0 0 0 0 1 0 1 0 0.0000000 0.0000000 0.0000000
1 0 0 0 0 -1 0 -1 0 0.0000000 0.0000000 0.0000000
0 0 -1 1 0 0 0 -1 0 0.0000000 0.0000000 0.0000000
0 0 1 -1 0 0 0 1 0 0.0000000 0.0000000 0.0000000
-1 0 0 0 0 -1 0 -1 0 0.0000000 0.0000000 0.0000000
1 0 0 0 0 1 0 1 0 0.0000000 0.0000000 0.0000000
0 0 1 -1 0 0 0 -1 0 0.0000000 0.0000000 0.0000000
0 0 -1 1 0 0 0 1 0 0.0000000 0.0000000 0.0000000
1 0 0 0 0 1 0 -1 0 0.0000000 0.0000000 0.0000000
-1 0 0 0 0 -1 0 1 0 0.0000000 0.0000000 0.0000000
1 0 0 0 1 0 0 0 1 0.7500000 0.7500000 0.0000000
-1 0 0 0 -1 0 0 0 -1 0.7500000 0.7500000 0.0000000
0 0 -1 0 1 0 1 0 0 0.7500000 0.7500000 0.0000000
0 0 1 0 -1 0 -1 0 0 0.7500000 0.7500000 0.0000000
-1 0 0 0 1 0 0 0 -1 0.7500000 0.7500000 0.0000000
1 0 0 0 -1 0 0 0 1 0.7500000 0.7500000 0.0000000
0 0 1 0 1 0 -1 0 0 0.7500000 0.7500000 0.0000000
0 0 -1 0 -1 0 1 0 0 0.7500000 0.7500000 0.0000000
1 0 0 0 -1 0 0 0 -1 0.7500000 0.7500000 0.0000000
-1 0 0 0 1 0 0 0 1 0.7500000 0.7500000 0.0000000
0 0 -1 0 -1 0 -1 0 0 0.7500000 0.7500000 0.0000000
0 0 1 0 1 0 1 0 0 0.7500000 0.7500000 0.0000000
-1 0 0 0 -1 0 0 0 1 0.7500000 0.7500000 0.0000000
1 0 0 0 1 0 0 0 -1 0.7500000 0.7500000 0.0000000
0 0 1 0 -1 0 1 0 0 0.7500000 0.7500000 0.0000000
0 0 -1 0 1 0 -1 0 0 0.7500000 0.7500000 0.0000000
0 1 0 0 0 1 1 0 0 0.7500000 0.7500000 0.0000000
0 -1 0 0 0 -1 -1 0 0 0.7500000 0.7500000 0.0000000
0 1 0 1 0 0 0 0 -1 0.7500000 0.7500000 0.0000000
0 -1 0 -1 0 0 0 0 1 0.7500000 0.7500000 0.0000000
0 1 0 0 0 -1 -1 0 0 0.7500000 0.7500000 0.0000000
0 -1 0 0 0 1 1 0 0 0.7500000 0.7500000 0.0000000
0 1 0 -1 0 0 0 0 1 0.7500000 0.7500000 0.0000000
0 -1 0 1 0 0 0 0 -1 0.7500000 0.7500000 0.0000000
0 -1 0 0 0 -1 1 0 0 0.7500000 0.7500000 0.0000000
0 1 0 0 0 1 -1 0 0 0.7500000 0.7500000 0.0000000
0 -1 0 -1 0 0 0 0 -1 0.7500000 0.7500000 0.0000000
0 1 0 1 0 0 0 0 1 0.7500000 0.7500000 0.0000000
0 -1 0 0 0 1 -1 0 0 0.7500000 0.7500000 0.0000000
0 1 0 0 0 -1 1 0 0 0.7500000 0.7500000 0.0000000
0 -1 0 1 0 0 0 0 1 0.7500000 0.7500000 0.0000000
0 1 0 -1 0 0 0 0 -1 0.7500000 0.7500000 0.0000000
0 0 1 1 0 0 0 1 0 0.7500000 0.7500000 0.0000000
0 0 -1 -1 0 0 0 -1 0 0.7500000 0.7500000 0.0000000
1 0 0 0 0 -1 0 1 0 0.7500000 0.7500000 0.0000000
-1 0 0 0 0 1 0 -1 0 0.7500000 0.7500000 0.0000000
0 0 -1 -1 0 0 0 1 0 0.7500000 0.7500000 0.0000000
0 0 1 1 0 0 0 -1 0 0.7500000 0.7500000 0.0000000
-1 0 0 0 0 1 0 1 0 0.7500000 0.7500000 0.0000000
1 0 0 0 0 -1 0 -1 0 0.7500000 0.7500000 0.0000000
0 0 -1 1 0 0 0 -1 0 0.7500000 0.7500000 0.0000000
0 0 1 -1 0 0 0 1 0 0.7500000 0.7500000 0.0000000
-1 0 0 0 0 -1 0 -1 0 0.7500000 0.7500000 0.0000000
1 0 0 0 0 1 0 1 0 0.7500000 0.7500000 0.0000000
0 0 1 -1 0 0 0 -1 0 0.7500000 0.7500000 0.0000000
0 0 -1 1 0 0 0 1 0 0.7500000 0.7500000 0.0000000
1 0 0 0 0 1 0 -1 0 0.7500000 0.7500000 0.0000000
-1 0 0 0 0 -1 0 1 0 0.7500000 0.7500000 0.0000000
1 0 0 0 1 0 0 0 1 0.5000000 0.5000000 0.0000000
-1 0 0 0 -1 0 0 0 -1 0.5000000 0.5000000 0.0000000
0 0 -1 0 1 0 1 0 0 0.5000000 0.5000000 0.0000000
0 0 1 0 -1 0 -1 0 0 0.5000000 0.5000000 0.0000000
-1 0 0 0 1 0 0 0 -1 0.5000000 0.5000000 0.0000000
1 0 0 0 -1 0 0 0 1 0.5000000 0.5000000 0.0000000
0 0 1 0 1 0 -1 0 0 0.5000000 0.5000000 0.0000000
0 0 -1 0 -1 0 1 0 0 0.5000000 0.5000000 0.0000000
1 0 0 0 -1 0 0 0 -1 0.5000000 0.5000000 0.0000000
-1 0 0 0 1 0 0 0 1 0.5000000 0.5000000 0.0000000
0 0 -1 0 -1 0 -1 0 0 0.5000000 0.5000000 0.0000000
0 0 1 0 1 0 1 0 0 0.5000000 0.5000000 0.0000000
-1 0 0 0 -1 0 0 0 1 0.5000000 0.5000000 0.0000000
1 0 0 0 1 0 0 0 -1 0.5000000 0.5000000 0.0000000
0 0 1 0 -1 0 1 0 0 0.5000000 0.5000000 0.0000000
0 0 -1 0 1 0 -1 0 0 0.5000000 0.5000000 0.0000000
0 1 0 0 0 1 1 0 0 0.5000000 0.5000000 0.0000000
0 -1 0 0 0 -1 -1 0 0 0.5000000 0.5000000 0.0000000
0 1 0 1 0 0 0 0 -1 0.5000000 0.5000000 0.0000000
0 -1 0 -1 0 0 0 0 1 0.5000000 0.5000000 0.0000000
0 1 0 0 0 -1 -1 0 0 0.5000000 0.5000000 0.0000000
0 -1 0 0 0 1 1 0 0 0.5000000 0.5000000 0.0000000
0 1 0 -1 0 0 0 0 1 0.5000000 0.5000000 0.0000000
0 -1 0 1 0 0 0 0 -1 0.5000000 0.5000000 0.0000000
0 -1 0 0 0 -1 1 0 0 0.5000000 0.5000000 0.0000000
0 1 0 0 0 1 -1 0 0 0.5000000 0.5000000 0.0000000
0 -1 0 -1 0 0 0 0 -1 0.5000000 0.5000000 0.0000000
0 1 0 1 0 0 0 0 1 0.5000000 0.5000000 0.0000000
0 -1 0 0 0 1 -1 0 0 0.5000000 0.5000000 0.0000000
0 1 0 0 0 -1 1 0 0 0.5000000 0.5000000 0.0000000
0 -1 0 1 0 0 0 0 1 0.5000000 0.5000000 0.0000000
0 1 0 -1 0 0 0 0 -1 0.5000000 0.5000000 0.0000000
0 0 1 1 0 0 0 1 0 0.5000000 0.5000000 0.0000000
0 0 -1 -1 0 0 0 -1 0 0.5000000 0.5000000 0.0000000
1 0 0 0 0 -1 0 1 0 0.5000000 0.5000000 0.0000000
-1 0 0 0 0 1 0 -1 0 0.5000000 0.5000000 0.0000000
0 0 -1 -1 0 0 0 1 0 0.5000000 0.5000000 0.0000000
0 0 1 1 0 0 0 -1 0 0.5000000 0.5000000 0.0000000
-1 0 0 0 0 1 0 1 0 0.5000000 0.5000000 0.0000000
1 0 0 0 0 -1 0 -1 0 0.5000000 0.5000000 0.0000000
0 0 -1 1 0 0 0 -1 0 0.5000000 0.5000000 0.0000000
0 0 1 -1 0 0 0 1 0 0.5000000 0.5000000 0.0000000
-1 0 0 0 0 -1 0 -1 0 0.5000000 0.5000000 0.0000000
1 0 0 0 0 1 0 1 0 0.5000000 0.5000000 0.0000000
0 0 1 -1 0 0 0 -1 0 0.5000000 0.5000000 0.0000000
0 0 -1 1 0 0 0 1 0 0.5000000 0.5000000 0.0000000
1 0 0 0 0 1 0 -1 0 0.5000000 0.5000000 0.0000000
-1 0 0 0 0 -1 0 1 0 0.5000000 0.5000000 0.0000000
1 0 0 0 1 0 0 0 1 0.2500000 0.2500000 0.0000000
-1 0 0 0 -1 0 0 0 -1 0.2500000 0.2500000 0.0000000
0 0 -1 0 1 0 1 0 0 0.2500000 0.2500000 0.0000000
0 0 1 0 -1 0 -1 0 0 0.2500000 0.2500000 0.0000000
-1 0 0 0 1 0 0 0 -1 0.2500000 0.2500000 0.0000000
1 0 0 0 -1 0 0 0 1 0.2500000 0.2500000 0.0000000
0 0 1 0 1 0 -1 0 0 0.2500000 0.2500000 0.0000000
0 0 -1 0 -1 0 1 0 0 0.2500000 0.2500000 0.0000000
1 0 0 0 -1 0 0 0 -1 0.2500000 0.2500000 0.0000000
-1 0 0 0 1 0 0 0 1 0.2500000 0.2500000 0.0000000
0 0 -1 0 -1 0 -1 0 0 0.2500000 0.2500000 0.0000000
0 0 1 0 1 0 1 0 0 0.2500000 0.2500000 0.0000000
-1 0 0 0 -1 0 0 0 1 0.2500000 0.2500000 0.0000000
1 0 0 0 1 0 0 0 -1 0.2500000 0.2500000 0.0000000
0 0 1 0 -1 0 1 0 0 0.2500000 0.2500000 0.0000000
0 0 -1 0 1 0 -1 0 0 0.2500000 0.2500000 0.0000000
0 1 0 0 0 1 1 0 0 0.2500000 0.2500000 0.0000000
0 -1 0 0 0 -1 -1 0 0 0.2500000 0.2500000 0.0000000
0 1 0 1 0 0 0 0 -1 0.2500000 0.2500000 0.0000000
0 -1 0 -1 0 0 0 0 1 0.2500000 0.2500000 0.0000000
0 1 0 0 0 -1 -1 0 0 0.2500000 0.2500000 0.0000000
0 -1 0 0 0 1 1 0 0 0.2500000 0.2500000 0.0000000
0 1 0 -1 0 0 0 0 1 0.2500000 0.2500000 0.0000000
0 -1 0 1 0 0 0 0 -1 0.2500000 0.2500000 0.0000000
0 -1 0 0 0 -1 1 0 0 0.2500000 0.2500000 0.0000000
0 1 0 0 0 1 -1 0 0 0.2500000 0.2500000 0.0000000
0 -1 0 -1 0 0 0 0 -1 0.2500000 0.2500000 0.0000000
0 1 0 1 0 0 0 0 1 0.2500000 0.2500000 0.0000000
0 -1 0 0 0 1 -1 0 0 0.2500000 0.2500000 0.0000000
0 1 0 0 0 -1 1 0 0 0.2500000 0.2500000 0.0000000
0 -1 0 1 0 0 0 0 1 0.2500000 0.2500000 0.0000000
0 1 0 -1 0 0 0 0 -1 0.2500000 0.2500000 0.0000000
0 0 1 1 0 0 0 1 0 0.2500000 0.2500000 0.0000000
0 0 -1 -1 0 0 0 -1 0 0.2500000 0.2500000 0.0000000
1 0 0 0 0 -1 0 1 0 0.2500000 0.2500000 0.0000000
-1 0 0 0 0 1 0 -1 0 0.2500000 0.2500000 0.0000000
0 0 -1 -1 0 0 0 1 0 0.2500000 0.2500000 0.0000000
0 0 1 1 0 0 0 -1 0 0.2500000 0.2500000 0.0000000
-1 0 0 0 0 1 0 1 0 0.2500000 0.2500000 0.0000000
1 0 0 0 0 -1 0 -1 0 0.2500000 0.2500000 0.0000000
0 0 -1 1 0 0 0 -1 0 0.2500000 0.2500000 0.0000000
0 0 1 -1 0 0 0 1 0 0.2500000 0.2500000 0.0000000
-1 0 0 0 0 -1 0 -1 0 0.2500000 0.2500000 0.0000000
1 0 0 0 0 1 0 1 0 0.2500000 0.2500000 0.0000000
0 0 1 -1 0 0 0 -1 0 0.2500000 0.2500000 0.0000000
0 0 -1 1 0 0 0 1 0 0.2500000 0.2500000 0.0000000
1 0 0 0 0 1 0 -1 0 0.2500000 0.2500000 0.0000000
-1 0 0 0 0 -1 0 1 0 0.2500000 0.2500000 0.0000000
1 0 0 0 1 0 0 0 1 0.2500000 0.0000000 0.7500000
-1 0 0 0 -1 0 0 0 -1 0.2500000 0.0000000 0.7500000
0 0 -1 0 1 0 1 0 0 0.2500000 0.0000000 0.7500000
0 0 1 0 -1 0 -1 0 0 0.2500000 0.0000000 0.7500000
-1 0 0 0 1 0 0 0 -1 0.2500000 0.0000000 0.7500000
1 0 0 0 -1 0 0 0 1 0.2500000 0.0000000 0.7500000
0 0 1 0 1 0 -1 0 0 0.2500000 0.0000000 0.7500000
0 0 -1 0 -1 0 1 0 0 0.2500000 0.0000000 0.7500000
1 0 0 0 -1 0 0 0 -1 0.2500000 0.0000000 0.7500000
-1 0 0 0 1 0 0 0 1 0.2500000 0.0000000 0.7500000
0 0 -1 0 -1 0 -1 0 0 0.2500000 0.0000000 0.7500000
0 0 1 0 1 0 1 0 0 0.2500000 0.0000000 0.7500000
-1 0 0 0 -1 0 0 0 1 0.2500000 0.0000000 0.7500000
1 0 0 0 1 0 0 0 -1 0.2500000 0.0000000 0.7500000
0 0 1 0 -1 0 1 0 0 0.2500000 0.0000000 0.7500000
0 0 -1 0 1 0 -1 0 0 0.2500000 0.0000000 0.7500000
0 1 0 0 0 1 1 0 0 0.2500000 0.0000000 0.7500000
0 -1 0 0 0 -1 -1 0 0 0.2500000 0.0000000 0.7500000
0 1 0 1 0 0 0 0 -1 0.2500000 0.0000000 0.7500000
0 -1 0 -1 0 0 0 0 1 0.2500000 0.0000000 0.7500000
0 1 0 0 0 -1 -1 0 0 0.2500000 0.0000000 0.7500000
0 -1 0 0 0 1 1 0 0 0.2500000 0.0000000 0.7500000
0 1 0 -1 0 0 0 0 1 0.2500000 0.0000000 0.7500000
0 -1 0 1 0 0 0 0 -1 0.2500000 0.0000000 0.7500000
0 -1 0 0 0 -1 1 0 0 0.2500000 0.0000000 0.7500000
0 1 0 0 0 1 -1 0 0 0.2500000 0.0000000 0.7500000
0 -1 0 -1 0 0 0 0 -1 0.2500000 0.0000000 0.7500000
0 1 0 1 0 0 0 0 1 0.2500000 0.0000000 0.7500000
0 -1 0 0 0 1 -1 0 0 0.2500000 0.0000000 0.7500000
0 1 0 0 0 -1 1 0 0 0.2500000 0.0000000 0.7500000
0 -1 0 1 0 0 0 0 1 0.2500000 0.0000000 0.7500000
0 1 0 -1 0 0 0 0 -1 0.2500000 0.0000000 0.7500000
0 0 1 1 0 0 0 1 0 0.2500000 0.0000000 0.7500000
0 0 -1 -1 0 0 0 -1 0 0.2500000 0.0000000 0.7500000
1 0 0 0 0 -1 0 1 0 0.2500000 0.0000000 0.7500000
-1 0 0 0 0 1 0 -1 0 0.2500000 0.0000000 0.7500000
0 0 -1 -1 0 0 0 1 0 0.2500000 0.0000000 0.7500000
0 0 1 1 0 0 0 -1 0 0.2500000 0.0000000 0.7500000
-1 0 0 0 0 1 0 1 0 0.2500000 0.0000000 0.7500000
1 0 0 0 0 -1 0 -1 0 0.2500000 0.0000000 0.7500000
0 0 -1 1 0 0 0 -1 0 0.2500000 0.0000000 0.7500000
0 0 1 -1 0 0 0 1 0 0.2500000 0.0000000 0.7500000
-1 0 0 0 0 -1 0 -1 0 0.2500000 0.0000000 0.7500000
1 0 0 0 0 1 0 1 0 0.2500000 0.0000000 0.7500000
0 0 1 -1 0 0 0 -1 0 0.2500000 0.0000000 0.7500000
0 0 -1 1 0 0 0 1 0 0.2500000 0.0000000 0.7500000
1 0 0 0 0 1 0 -1 0 0.2500000 0.0000000 0.7500000
-1 0 0 0 0 -1 0 1 0 0.2500000 0.0000000 0.7500000
1 0 0 0 1 0 0 0 1 0.0000000 0.7500000 0.7500000
-1 0 0 0 -1 0 0 0 -1 0.0000000 0.7500000 0.7500000
0 0 -1 0 1 0 1 0 0 0.0000000 0.7500000 0.7500000
0 0 1 0 -1 0 -1 0 0 0.0000000 0.7500000 0.7500000
-1 0 0 0 1 0 0 0 -1 0.0000000 0.7500000 0.7500000
1 0 0 0 -1 0 0 0 1 0.0000000 0.7500000 0.7500000
0 0 1 0 1 0 -1 0 0 0.0000000 0.7500000 0.7500000
0 0 -1 0 -1 0 1 0 0 0.0000000 0.7500000 0.7500000
1 0 0 0 -1 0 0 0 -1 0.0000000 0.7500000 0.7500000
-1 0 0 0 1 0 0 0 1 0.0000000 0.7500000 0.7500000
0 0 -1 0 -1 0 -1 0 0 0.0000000 0.7500000 0.7500000
0 0 1 0 1 0 1 0 0 0.0000000 0.7500000 0.7500000
-1 0 0 0 -1 0 0 0 1 0.0000000 0.7500000 0.7500000
1 0 0 0 1 0 0 0 -1 0.0000000 0.7500000 0.7500000
0 0 1 0 -1 0 1 0 0 0.0000000 0.7500000 0.7500000
0 0 -1 0 1 0 -1 0 0 0.0000000 0.7500000 0.7500000
0 1 0 0 0 1 1 0 0 0.0000000 0.7500000 0.7500000
0 -1 0 0 0 -1 -1 0 0 0.0000000 0.7500000 0.7500000
0 1 0 1 0 0 0 0 -1 0.0000000 0.7500000 0.7500000
0 -1 0 -1 0 0 0 0 1 0.0000000 0.7500000 0.7500000
0 1 0 0 0 -1 -1 0 0 0.0000000 0.7500000 0.7500000
0 -1 0 0 0 1 1 0 0 0.0000000 0.7500000 0.7500000
0 1 0 -1 0 0 0 0 1 0.0000000 0.7500000 0.7500000
0 -1 0 1 0 0 0 0 -1 0.0000000 0.7500000 0.7500000
0 -1 0 0 0 -1 1 0 0 0.0000000 0.7500000 0.7500000
0 1 0 0 0 1 -1 0 0 0.0000000 0.7500000 0.7500000
0 -1 0 -1 0 0 0 0 -1 0.0000000 0.7500000 0.7500000
0 1 0 1 0 0 0 0 1 0.0000000 0.7500000 0.7500000
0 -1 0 0 0 1 -1 0 0 0.0000000 0.7500000 0.7500000
0 1 0 0 0 -1 1 0 0 0.0000000 0.7500000 0.7500000
0 -1 0 1 0 0 0 0 1 0.0000000 0.7500000 0.7500000
0 1 0 -1 0 0 0 0 -1 0.0000000 0.7500000 0.7500000
0 0 1 1 0 0 0 1 0 0.0000000 0.7500000 0.7500000
0 0 -1 -1 0 0 0 -1 0 0.0000000 0.7500000 0.7500000
1 0 0 0 0 -1 0 1 0 0.0000000 0.7500000 0.7500000
-1 0 0 0 0 1 0 -1 0 0.0000000 0.7500000 0.7500000
0 0 -1 -1 0 0 0 1 0 0.0000000 0.7500000 0.7500000
0 0 1 1 0 0 0 -1 0 0.0000000 0.7500000 0.7500000
-1 0 0 0 0 1 0 1 0 0.0000000 0.7500000 0.7500000
1 0 0 0 0 -1 0 -1 0 0.0000000 0.7500000 0.7500000
0 0 -1 1 0 0 0 -1 0 0.0000000 0.7500000 0.7500000
0 0 1 -1 0 0 0 1 0 0.0000000 0.7500000 0.7500000
-1 0 0 0 0 -1 0 -1 0 0.0000000 0.7500000 0.7500000
1 0 0 0 0 1 0 1 0 0.0000000 0.7500000 0.7500000
0 0 1 -1 0 0 0 -1 0 0.0000000 0.7500000 0.7500000
0 0 -1 1 0 0 0 1 0 0.0000000 0.7500000 0.7500000
1 0 0 0 0 1 0 -1 0 0.0000000 0.7500000 0.7500000
-1 0 0 0 0 -1 0 1 0 0.0000000 0.7500000 0.7500000
1 0 0 0 1 0 0 0 1 0.7500000 0.5000000 0.7500000
-1 0 0 0 -1 0 0 0 -1 0.7500000 0.5000000 0.7500000
0 0 -1 0 1 0 1 0 0 0.7500000 0.5000000 0.7500000
0 0 1 0 -1 0 -1 0 0 0.7500000 0.5000000 0.7500000
-1 0 0 0 1 0 0 0 -1 0.7500000 0.5000000 0.7500000
1 0 0 0 -1 0 0 0 1 0.7500000 0.5000000 0.7500000
0 0 1 0 1 0 -1 0 0 0.7500000 0.5000000 0.7500000
0 0 -1 0 -1 0 1 0 0 0.7500000 0.5000000 0.7500000
1 0 0 0 -1 0 0 0 -1 0.7500000 0.5000000 0.7500000
-1 0 0 0 1 0 0 0 1 0.7500000 0.5000000 0.7500000
0 0 -1 0 -1 0 -1 0 0 0.7500000 0.5000000 0.7500000
0 0 1 0 1 0 1 0 0 0.7500000 0.5000000 0.7500000
-1 0 0 0 -1 0 0 0 1 0.7500000 0.5000000 0.7500000
1 0 0 0 1 0 0 0 -1 0.7500000 0.5000000 0.7500000
0 0 1 0 -1 0 1 0 0 0.7500000 0.5000000 0.7500000
0 0 -1 0 1 0 -1 0 0 0.7500000 0.5000000 0.7500000
0 1 0 0 0 1 1 0 0 0.7500000 0.5000000 0.7500000
0 -1 0 0 0 -1 -1 0 0 0.7500000 0.5000000 0.7500000
0 1 0 1 0 0 0 0 -1 0.7500000 0.5000000 0.7500000
0 -1 0 -1 0 0 0 0 1 0.7500000 0.5000000 0.7500000
0 1 0 0 0 -1 -1 0 0 0.7500000 0.5000000 0.7500000
0 -1 0 0 0 1 1 0 0 0.7500000 0.5000000 0.7500000
0 1 0 -1 0 0 0 0 1 0.7500000 0.5000000 0.7500000
0 -1 0 1 0 0 0 0 -1 0.7500000 0.5000000 0.7500000
0 -1 0 0 0 -1 1 0 0 0.7500000 0.5000000 0.7500000
0 1 0 0 0 1 -1 0 0 0.7500000 0.5000000 0.7500000
0 -1 0 -1 0 0 0 0 -1 0.7500000 0.5000000 0.7500000
0 1 0 1 0 0 0 0 1 0.7500000 0.5000000 0.7500000
0 -1 0 0 0 1 -1 0 0 0.7500000 0.5000000 0.7500000
0 1 0 0 0 -1 1 0 0 0.7500000 0.5000000 0.7500000
0 -1 0 1 0 0 0 0 1 0.7500000 0.5000000 0.7500000
0 1 0 -1 0 0 0 0 -1 0.7500000 0.5000000 0.7500000
0 0 1 1 0 0 0 1 0 0.7500000 0.5000000 0.7500000
0 0 -1 -1 0 0 0 -1 0 0.7500000 0.5000000 0.7500000
1 0 0 0 0 -1 0 1 0 0.7500000 0.5000000 0.7500000
-1 0 0 0 0 1 0 -1 0 0.7500000 0.5000000 0.7500000
0 0 -1 -1 0 0 0 1 0 0.7500000 0.5000000 0.7500000
0 0 1 1 0 0 0 -1 0 0.7500000 0.5000000 0.7500000
-1 0 0 0 0 1 0 1 0 0.7500000 0.5000000 0.7500000
1 0 0 0 0 -1 0 -1 0 0.7500000 0.5000000 0.7500000
0 0 -1 1 0 0 0 -1 0 0.7500000 0.5000000 0.7500000
0 0 1 -1 0 0 0 1 0 0.7500000 0.5000000 0.7500000
-1 0 0 0 0 -1 0 -1 0 0.7500000 0.5000000 0.7500000
1 0 0 0 0 1 0 1 0 0.7500000 0.5000000 0.7500000
0 0 1 -1 0 0 0 -1 0 0.7500000 0.5000000 0.7500000
0 0 -1 1 0 0 0 1 0 0.7500000 0.5000000 0.7500000
1 0 0 0 0 1 0 -1 0 0.7500000 0.5000000 0.7500000
-1 0 0 0 0 -1 0 1 0 0.7500000 0.5000000 0.7500000
1 0 0 0 1 0 0 0 1 0.5000000 0.2500000 0.7500000
-1 0 0 0 -1 0 0 0 -1 0.5000000 0.2500000 0.7500000
0 0 -1 0 1 0 1 0 0 0.5000000 0.2500000 0.7500000
0 0 1 0 -1 0 -1 0 0 0.5000000 0.2500000 0.7500000
-1 0 0 0 1 0 0 0 -1 0.5000000 0.2500000 0.7500000
1 0 0 0 -1 0 0 0 1 0.5000000 0.2500000 0.7500000
0 0 1 0 1 0 -1 0 0 0.5000000 0.2500000 0.7500000
0 0 -1 0 -1 0 1 0 0 0.5000000 0.2500000 0.7500000
1 0 0 0 -1 0 0 0 -1 0.5000000 0.2500000 0.7500000
-1 0 0 0 1 0 0 0 1 0.5000000 0.2500000 0.7500000
0 0 -1 0 -1 0 -1 0 0 0.5000000 0.2500000 0.7500000
0 0 1 0 1 0 1 0 0 0.5000000 0.2500000 0.7500000
-1 0 0 0 -1 0 0 0 1 0.5000000 0.2500000 0.7500000
1 0 0 0 1 0 0 0 -1 0.5000000 0.2500000 0.7500000
0 0 1 0 -1 0 1 0 0 0.5000000 0.2500000 0.7500000
0 0 -1 0 1 0 -1 0 0 0.5000000 0.2500000 0.7500000
0 1 0 0 0 1 1 0 0 0.5000000 0.2500000 0.7500000
0 -1 0 0 0 -1 -1 0 0 0.5000000 0.2500000 0.7500000
0 1 0 1 0 0 0 0 -1 0.5000000 0.2500000 0.7500000
0 -1 0 -1 0 0 0 0 1 0.5000000 0.2500000 0.7500000
0 1 0 0 0 -1 -1 0 0 0.5000000 0.2500000 0.7500000
0 -1 0 0 0 1 1 0 0 0.5000000 0.2500000 0.7500000
0 1 0 -1 0 0 0 0 1 0.5000000 0.2500000 0.7500000
0 -1 0 1 0 0 0 0 -1 0.5000000 0.2500000 0.7500000
0 -1 0 0 0 -1 1 0 0 0.5000000 0.2500000 0.7500000
0 1 0 0 0 1 -1 0 0 0.5000000 0.2500000 0.7500000
0 -1 0 -1 0 0 0 0 -1 0.5000000 0.2500000 0.7500000
0 1 0 1 0 0 0 0 1 0.5000000 0.2500000 0.7500000
0 -1 0 0 0 1 -1 0 0 0.5000000 0.2500000 0.7500000
0 1 0 0 0 -1 1 0 0 0.5000000 0.2500000 0.7500000
0 -1 0 1 0 0 0 0 1 0.5000000 0.2500000 0.7500000
0 1 0 -1 0 0 0 0 -1 0.5000000 0.2500000 0.7500000
0 0 1 1 0 0 0 1 0 0.5000000 0.2500000 0.7500000
0 0 -1 -1 0 0 0 -1 0 0.5000000 0.2500000 0.7500000
1 0 0 0 0 -1 0 1 0 0.5000000 0.2500000 0.7500000
-1 0 0 0 0 1 0 -1 0 0.5000000 0.2500000 0.7500000
0 0 -1 -1 0 0 0 1 0 0.5000000 0.2500000 0.7500000
0 0 1 1 0 0 0 -1 0 0.5000000 0.2500000 0.7500000
-1 0 0 0 0 1 0 1 0 0.5000000 0.2500000 0.7500000
1 0 0 0 0 -1 0 -1 0 0.5000000 0.2500000 0.7500000
0 0 -1 1 0 0 0 -1 0 0.5000000 0.2500000 0.7500000
0 0 1 -1 0 0 0 1 0 0.5000000 0.2500000 0.7500000
-1 0 0 0 0 -1 0 -1 0 0.5000000 0.2500000 0.7500000
1 0 0 0 0 1 0 1 0 0.5000000 0.2500000 0.7500000
0 0 1 -1 0 0 0 -1 0 0.5000000 0.2500000 0.7500000
0 0 -1 1 0 0 0 1 0 0.5000000 0.2500000 0.7500000
1 0 0 0 0 1 0 -1 0 0.5000000 0.2500000 0.7500000
-1 0 0 0 0 -1 0 1 0 0.5000000 0.2500000 0.7500000
1 0 0 0 1 0 0 0 1 0.5000000 0.0000000 0.5000000
-1 0 0 0 -1 0 0 0 -1 0.5000000 0.0000000 0.5000000
0 0 -1 0 1 0 1 0 0 0.5000000 0.0000000 0.5000000
0 0 1 0 -1 0 -1 0 0 0.5000000 0.0000000 0.5000000
-1 0 0 0 1 0 0 0 -1 0.5000000 0.0000000 0.5000000
1 0 0 0 -1 0 0 0 1 0.5000000 0.0000000 0.5000000
0 0 1 0 1 0 -1 0 0 0.5000000 0.0000000 0.5000000
0 0 -1 0 -1 0 1 0 0 0.5000000 0.0000000 0.5000000
1 0 0 0 -1 0 0 0 -1 0.5000000 0.0000000 0.5000000
-1 0 0 0 1 0 0 0 1 0.5000000 0.0000000 0.5000000
0 0 -1 0 -1 0 -1 0 0 0.5000000 0.0000000 0.5000000
0 0 1 0 1 0 1 0 0 0.5000000 0.0000000 0.5000000
-1 0 0 0 -1 0 0 0 1 0.5000000 0.0000000 0.5000000
1 0 0 0 1 0 0 0 -1 0.5000000 0.0000000 0.5000000
0 0 1 0 -1 0 1 0 0 0.5000000 0.0000000 0.5000000
0 0 -1 0 1 0 -1 0 0 0.5000000 0.0000000 0.5000000
0 1 0 0 0 1 1 0 0 0.5000000 0.0000000 0.5000000
0 -1 0 0 0 -1 -1 0 0 0.5000000 0.0000000 0.5000000
0 1 0 1 0 0 0 0 -1 0.5000000 0.0000000 0.5000000
0 -1 0 -1 0 0 0 0 1 0.5000000 0.0000000 0.5000000
0 1 0 0 0 -1 -1 0 0 0.5000000 0.0000000 0.5000000
0 -1 0 0 0 1 1 0 0 0.5000000 0.0000000 0.5000000
0 1 0 -1 0 0 0 0 1 0.5000000 0.0000000 0.5000000
0 -1 0 1 0 0 0 0 -1 0.5000000 0.0000000 0.5000000
0 -1 0 0 0 -1 1 0 0 0.5000000 0.0000000 0.5000000
0 1 0 0 0 1 -1 0 0 0.5000000 0.0000000 0.5000000
0 -1 0 -1 0 0 0 0 -1 0.5000000 0.0000000 0.5000000
0 1 0 1 0 0 0 0 1 0.5000000 0.0000000 0.5000000
0 -1 0 0 0 1 -1 0 0 0.5000000 0.0000000 0.5000000
0 1 0 0 0 -1 1 0 0 0.5000000 0.0000000 0.5000000
0 -1 0 1 0 0 0 0 1 0.5000000 0.0000000 0.5000000
0 1 0 -1 0 0 0 0 -1 0.5000000 0.0000000 0.5000000
0 0 1 1 0 0 0 1 0 0.5000000 0.0000000 0.5000000
0 0 -1 -1 0 0 0 -1 0 0.5000000 0.0000000 0.5000000
1 0 0 0 0 -1 0 1 0 0.5000000 0.0000000 0.5000000
-1 0 0 0 0 1 0 -1 0 0.5000000 0.0000000 0.5000000
0 0 -1 -1 0 0 0 1 0 0.5000000 0.0000000 0.5000000
0 0 1 1 0 0 0 -1 0 0.5000000 0.0000000 0.5000000
-1 0 0 0 0 1 0 1 0 0.5000000 0.0000000 0.5000000
1 0 0 0 0 -1 0 -1 0 0.5000000 0.0000000 0.5000000
0 0 -1 1 0 0 0 -1 0 0.5000000 0.0000000 0.5000000
0 0 1 -1 0 0 0 1 0 0.5000000 0.0000000 0.5000000
-1 0 0 0 0 -1 0 -1 0 0.5000000 0.0000000 0.5000000
1 0 0 0 0 1 0 1 0 0.5000000 0.0000000 0.5000000
0 0 1 -1 0 0 0 -1 0 0.5000000 0.0000000 0.5000000
0 0 -1 1 0 0 0 1 0 0.5000000 0.0000000 0.5000000
1 0 0 0 0 1 0 -1 0 0.5000000 0.0000000 0.5000000
-1 0 0 0 0 -1 0 1 0 0.5000000 0.0000000 0.5000000
1 0 0 0 1 0 0 0 1 0.2500000 0.7500000 0.5000000
-1 0 0 0 -1 0 0 0 -1 0.2500000 0.7500000 0.5000000
0 0 -1 0 1 0 1 0 0 0.2500000 0.7500000 0.5000000
0 0 1 0 -1 0 -1 0 0 0.2500000 0.7500000 0.5000000
-1 0 0 0 1 0 0 0 -1 0.2500000 0.7500000 0.5000000
1 0 0 0 -1 0 0 0 1 0.2500000 0.7500000 0.5000000
0 0 1 0 1 0 -1 0 0 0.2500000 0.7500000 0.5000000
0 0 -1 0 -1 0 1 0 0 0.2500000 0.7500000 0.5000000
1 0 0 0 -1 0 0 0 -1 0.2500000 0.7500000 0.5000000
-1 0 0 0 1 0 0 0 1 0.2500000 0.7500000 0.5000000
0 0 -1 0 -1 0 -1 0 0 0.2500000 0.7500000 0.5000000
0 0 1 0 1 0 1 0 0 0.2500000 0.7500000 0.5000000
-1 0 0 0 -1 0 0 0 1 0.2500000 0.7500000 0.5000000
1 0 0 0 1 0 0 0 -1 0.2500000 0.7500000 0.5000000
0 0 1 0 -1 0 1 0 0 0.2500000 0.7500000 0.5000000
0 0 -1 0 1 0 -1 0 0 0.2500000 0.7500000 0.5000000
0 1 0 0 0 1 1 0 0 0.2500000 0.7500000 0.5000000
0 -1 0 0 0 -1 -1 0 0 0.2500000 0.7500000 0.5000000
0 1 0 1 0 0 0 0 -1 0.2500000 0.7500000 0.5000000
0 -1 0 -1 0 0 0 0 1 0.2500000 0.7500000 0.5000000
0 1 0 0 0 -1 -1 0 0 0.2500000 0.7500000 0.5000000
0 -1 0 0 0 1 1 0 0 0.2500000 0.7500000 0.5000000
0 1 0 -1 0 0 0 0 1 0.2500000 0.7500000 0.5000000
0 -1 0 1 0 0 0 0 -1 0.2500000 0.7500000 0.5000000
0 -1 0 0 0 -1 1 0 0 0.2500000 0.7500000 0.5000000
0 1 0 0 0 1 -1 0 0 0.2500000 0.7500000 0.5000000
0 -1 0 -1 0 0 0 0 -1 0.2500000 0.7500000 0.5000000
0 1 0 1 0 0 0 0 1 0.2500000 0.7500000 0.5000000
0 -1 0 0 0 1 -1 0 0 0.2500000 0.7500000 0.5000000
0 1 0 0 0 -1 1 0 0 0.2500000 0.7500000 0.5000000
0 -1 0 1 0 0 0 0 1 0.2500000 0.7500000 0.5000000
0 1 0 -1 0 0 0 0 -1 0.2500000 0.7500000 0.5000000
0 0 1 1 0 0 0 1 0 0.2500000 0.7500000 0.5000000
0 0 -1 -1 0 0 0 -1 0 0.2500000 0.7500000 0.5000000
1 0 0 0 0 -1 0 1 0 0.2500000 0.7500000 0.5000000
-1 0 0 0 0 1 0 -1 0 0.2500000 0.7500000 0.5000000
0 0 -1 -1 0 0 0 1 0 0.2500000 0.7500000 0.5000000
0 0 1 1 0 0 0 -1 0 0.2500000 0.7500000 0.5000000
-1 0 0 0 0 1 0 1 0 0.2500000 0.7500000 0.5000000
1 0 0 0 0 -1 0 -1 0 0.2500000 0.7500000 0.5000000
0 0 -1 1 0 0 0 -1 0 0.2500000 0.7500000 0.5000000
0 0 1 -1 0 0 0 1 0 0.2500000 0.7500000 0.5000000
-1 0 0 0 0 -1 0 -1 0 0.2500000 0.7500000 0.5000000
1 0 0 0 0 1 0 1 0 0.2500000 0.7500000 0.5000000
0 0 1 -1 0 0 0 -1 0 0.2500000 0.7500000 0.5000000
0 0 -1 1 0 0 0 1 0 0.2500000 0.7500000 0.5000000
1 0 0 0 0 1 0 -1 0 0.2500000 0.7500000 0.5000000
-1 0 0 0 0 -1 0 1 0 0.2500000 0.7500000 0.5000000
1 0 0 0 1 0 0 0 1 0.0000000 0.5000000 0.5000000
-1 0 0 0 -1 0 0 0 -1 0.0000000 0.5000000 0.5000000
0 0 -1 0 1 0 1 0 0 0.0000000 0.5000000 0.5000000
0 0 1 0 -1 0 -1 0 0 0.0000000 0.5000000 0.5000000
-1 0 0 0 1 0 0 0 -1 0.0000000 0.5000000 0.5000000
1 0 0 0 -1 0 0 0 1 0.0000000 0.5000000 0.5000000
0 0 1 0 1 0 -1 0 0 0.0000000 0.5000000 0.5000000
0 0 -1 0 -1 0 1 0 0 0.0000000 0.5000000 0.5000000
1 0 0 0 -1 0 0 0 -1 0.0000000 0.5000000 0.5000000
-1 0 0 0 1 0 0 0 1 0.0000000 0.5000000 0.5000000
0 0 -1 0 -1 0 -1 0 0 0.0000000 0.5000000 0.5000000
0 0 1 0 1 0 1 0 0 0.0000000 0.5000000 0.5000000
-1 0 0 0 -1 0 0 0 1 0.0000000 0.5000000 0.5000000
1 0 0 0 1 0 0 0 -1 0.0000000 0.5000000 0.5000000
0 0 1 0 -1 0 1 0 0 0.0000000 0.5000000 0.5000000
0 0 -1 0 1 0 -1 0 0 0.0000000 0.5000000 0.5000000
0 1 0 0 0 1 1 0 0 0.0000000 0.5000000 0.5000000
0 -1 0 0 0 -1 -1 0 0 0.0000000 0.5000000 0.5000000
0 1 0 1 0 0 0 0 -1 0.0000000 0.5000000 0.5000000
0 -1 0 -1 0 0 0 0 1 0.0000000 0.5000000 0.5000000
0 1 0 0 0 -1 -1 0 0 0.0000000 0.5000000 0.5000000
0 -1 0 0 0 1 1 0 0 0.0000000 0.5000000 0.5000000
0 1 0 -1 0 0 0 0 1 0.0000000 0.5000000 0.5000000
0 -1 0 1 0 0 0 0 -1 0.0000000 0.5000000 0.5000000
0 -1 0 0 0 -1 1 0 0 0.0000000 0.5000000 0.5000000
0 1 0 0 0 1 -1 0 0 0.0000000 0.5000000 0.5000000
0 -1 0 -1 0 0 0 0 -1 0.0000000 0.5000000 0.5000000
0 1 0 1 0 0 0 0 1 0.0000000 0.5000000 0.5000000
0 -1 0 0 0 1 -1 0 0 0.0000000 0.5000000 0.5000000
0 1 0 0 0 -1 1 0 0 0.0000000 0.5000000 0.5000000
0 -1 0 1 0 0 0 0 1 0.0000000 0.5000000 0.5000000
0 1 0 -1 0 0 0 0 -1 0.0000000 0.5000000 0.5000000
0 0 1 1 0 0 0 1 0 0.0000000 0.5000000 0.5000000
0 0 -1 -1 0 0 0 -1 0 0.0000000 0.5000000 0.5000000
1 0 0 0 0 -1 0 1 0 0.0000000 0.5000000 0.5000000
-1 0 0 0 0 1 0 -1 0 0.0000000 0.5000000 0.5000000
0 0 -1 -1 0 0 0 1 0 0.0000000 0.5000000 0.5000000
0 0 1 1 0 0 0 -1 0 0.0000000 0.5000000 0.5000000
-1 0 0 0 0 1 0 1 0 0.0000000 0.5000000 0.5000000
1 0 0 0 0 -1 0 -1 0 0.0000000 0.5000000 0.5000000
0 0 -1 1 0 0 0 -1 0 0.0000000 0.5000000 0.5000000
0 0 1 -1 0 0 0 1 0 0.0000000 0.5000000 0.5000000
-1 0 0 0 0 -1 0 -1 0 0.0000000 0.5000000 0.5000000
1 0 0 0 0 1 0 1 0 0.0000000 0.5000000 0.5000000
0 0 1 -1 0 0 0 -1 0 0.0000000 0.5000000 0.5000000
0 0 -1 1 0 0 0 1 0 0.0000000 0.5000000 0.5000000
1 0 0 0 0 1 0 -1 0 0.0000000 0.5000000 0.5000000
-1 0 0 0 0 -1 0 1 0 0.0000000 0.5000000 0.5000000
1 0 0 0 1 0 0 0 1 0.7500000 0.2500000 0.5000000
-1 0 0 0 -1 0 0 0 -1 0.7500000 0.2500000 0.5000000
0 0 -1 0 1 0 1 0 0 0.7500000 0.2500000 0.5000000
0 0 1 0 -1 0 -1 0 0 0.7500000 0.2500000 0.5000000
-1 0 0 0 1 0 0 0 -1 0.7500000 0.2500000 0.5000000
1 0 0 0 -1 0 0 0 1 0.7500000 0.2500000 0.5000000
0 0 1 0 1 0 -1 0 0 0.7500000 0.2500000 0.5000000
0 0 -1 0 -1 0 1 0 0 0.7500000 0.2500000 0.5000000
1 0 0 0 -1 0 0 0 -1 0.7500000 0.2500000 0.5000000
-1 0 0 0 1 0 0 0 1 0.7500000 0.2500000 0.5000000
0 0 -1 0 -1 0 -1 0 0 0.7500000 0.2500000 0.5000000
0 0 1 0 1 0 1 0 0 0.7500000 0.2500000 0.5000000
-1 0 0 0 -1 0 0 0 1 0.7500000 0.2500000 0.5000000
1 0 0 0 1 0 0 0 -1 0.7500000 0.2500000 0.5000000
0 0 1 0 -1 0 1 0 0 0.7500000 0.2500000 0.5000000
0 0 -1 0 1 0 -1 0 0 0.7500000 0.2500000 0.5000000
0 1 0 0 0 1 1 0 0 0.7500000 0.2500000 0.5000000
0 -1 0 0 0 -1 -1 0 0 0.7500000 0.2500000 0.5000000
0 1 0 1 0 0 0 0 -1 0.7500000 0.2500000 0.5000000
0 -1 0 -1 0 0 0 0 1 0.7500000 0.2500000 0.5000000
0 1 0 0 0 -1 -1 0 0 0.7500000 0.2500000 0.5000000
0 -1 0 0 0 1 1 0 0 0.7500000 0.2500000 0.5000000
0 1 0 -1 0 0 0 0 1 0.7500000 0.2500000 0.5000000
0 -1 0 1 0 0 0 0 -1 0.7500000 0.2500000 0.5000000
0 -1 0 0 0 -1 1 0 0 0.7500000 0.2500000 0.5000000
0 1 0 0 0 1 -1 0 0 0.7500000 0.2500000 0.5000000
0 -1 0 -1 0 0 0 0 -1 0.7500000 0.2500000 0.5000000
0 1 0 1 0 0 0 0 1 0.7500000 0.2500000 0.5000000
0 -1 0 0 0 1 -1 0 0 0.7500000 0.2500000 0.5000000
0 1 0 0 0 -1 1 0 0 0.7500000 0.2500000 0.5000000
0 -1 0 1 0 0 0 0 1 0.7500000 0.2500000 0.5000000
0 1 0 -1 0 0 0 0 -1 0.7500000 0.2500000 0.5000000
0 0 1 1 0 0 0 1 0 0.7500000 0.2500000 0.5000000
0 0 -1 -1 0 0 0 -1 0 0.7500000 0.2500000 0.5000000
1 0 0 0 0 -1 0 1 0 0.7500000 0.2500000 0.5000000
-1 0 0 0 0 1 0 -1 0 0.7500000 0.2500000 0.5000000
0 0 -1 -1 0 0 0 1 0 0.7500000 0.2500000 0.5000000
0 0 1 1 0 0 0 -1 0 0.7500000 0.2500000 0.5000000
-1 0 0 0 0 1 0 1 0 0.7500000 0.2500000 0.5000000
1 0 0 0 0 -1 0 -1 0 0.7500000 0.2500000 0.5000000
0 0 -1 1 0 0 0 -1 0 0.7500000 0.2500000 0.5000000
0 0 1 -1 0 0 0 1 0 0.7500000 0.2500000 0.5000000
-1 0 0 0 0 -1 0 -1 0 0.7500000 0.2500000 0.5000000
1 0 0 0 0 1 0 1 0 0.7500000 0.2500000 0.5000000
0 0 1 -1 0 0 0 -1 0 0.7500000 0.2500000 0.5000000
0 0 -1 1 0 0 0 1 0 0.7500000 0.2500000 0.5000000
1 0 0 0 0 1 0 -1 0 0.7500000 0.2500000 0.5000000
-1 0 0 0 0 -1 0 1 0 0.7500000 0.2500000 0.5000000
1 0 0 0 1 0 0 0 1 0.7500000 0.0000000 0.2500000
-1 0 0 0 -1 0 0 0 -1 0.7500000 0.0000000 0.2500000
0 0 -1 0 1 0 1 0 0 0.7500000 0.0000000 0.2500000
0 0 1 0 -1 0 -1 0 0 0.7500000 0.0000000 0.2500000
-1 0 0 0 1 0 0 0 -1 0.7500000 0.0000000 0.2500000
1 0 0 0 -1 0 0 0 1 0.7500000 0.0000000 0.2500000
0 0 1 0 1 0 -1 0 0 0.7500000 0.0000000 0.2500000
0 0 -1 0 -1 0 1 0 0 0.7500000 0.0000000 0.2500000
1 0 0 0 -1 0 0 0 -1 0.7500000 0.0000000 0.2500000
-1 0 0 0 1 0 0 0 1 0.7500000 0.0000000 0.2500000
0 0 -1 0 -1 0 -1 0 0 0.7500000 0.0000000 0.2500000
0 0 1 0 1 0 1 0 0 0.7500000 0.0000000 0.2500000
-1 0 0 0 -1 0 0 0 1 0.7500000 0.0000000 0.2500000
1 0 0 0 1 0 0 0 -1 0.7500000 0.0000000 0.2500000
0 0 1 0 -1 0 1 0 0 0.7500000 0.0000000 0.2500000
0 0 -1 0 1 0 -1 0 0 0.7500000 0.0000000 0.2500000
0 1 0 0 0 1 1 0 0 0.7500000 0.0000000 0.2500000
0 -1 0 0 0 -1 -1 0 0 0.7500000 0.0000000 0.2500000
0 1 0 1 0 0 0 0 -1 0.7500000 0.0000000 0.2500000
0 -1 0 -1 0 0 0 0 1 0.7500000 0.0000000 0.2500000
0 1 0 0 0 -1 -1 0 0 0.7500000 0.0000000 0.2500000
0 -1 0 0 0 1 1 0 0 0.7500000 0.0000000 0.2500000
0 1 0 -1 0 0 0 0 1 0.7500000 0.0000000 0.2500000
0 -1 0 1 0 0 0 0 -1 0.7500000 0.0000000 0.2500000
0 -1 0 0 0 -1 1 0 0 0.7500000 0.0000000 0.2500000
0 1 0 0 0 1 -1 0 0 0.7500000 0.0000000 0.2500000
0 -1 0 -1 0 0 0 0 -1 0.7500000 0.0000000 0.2500000
0 1 0 1 0 0 0 0 1 0.7500000 0.0000000 0.2500000
0 -1 0 0 0 1 -1 0 0 0.7500000 0.0000000 0.2500000
0 1 0 0 0 -1 1 0 0 0.7500000 0.0000000 0.2500000
0 -1 0 1 0 0 0 0 1 0.7500000 0.0000000 0.2500000
0 1 0 -1 0 0 0 0 -1 0.7500000 0.0000000 0.2500000
0 0 1 1 0 0 0 1 0 0.7500000 0.0000000 0.2500000
0 0 -1 -1 0 0 0 -1 0 0.7500000 0.0000000 0.2500000
1 0 0 0 0 -1 0 1 0 0.7500000 0.0000000 0.2500000
-1 0 0 0 0 1 0 -1 0 0.7500000 0.0000000 0.2500000
0 0 -1 -1 0 0 0 1 0 0.7500000 0.0000000 0.2500000
0 0 1 1 0 0 0 -1 0 0.7500000 0.0000000 0.2500000
-1 0 0 0 0 1 0 1 0 0.7500000 0.0000000 0.2500000
1 0 0 0 0 -1 0 -1 0 0.7500000 0.0000000 0.2500000
0 0 -1 1 0 0 0 -1 0 0.7500000 0.0000000 0.2500000
0 0 1 -1 0 0 0 1 0 0.7500000 0.0000000 0.2500000
-1 0 0 0 0 -1 0 -1 0 0.7500000 0.0000000 0.2500000
1 0 0 0 0 1 0 1 0 0.7500000 0.0000000 0.2500000
0 0 1 -1 0 0 0 -1 0 0.7500000 0.0000000 0.2500000
0 0 -1 1 0 0 0 1 0 0.7500000 0.0000000 0.2500000
1 0 0 0 0 1 0 -1 0 0.7500000 0.0000000 0.2500000
-1 0 0 0 0 -1 0 1 0 0.7500000 0.0000000 0.2500000
1 0 0 0 1 0 0 0 1 0.5000000 0.7500000 0.2500000
-1 0 0 0 -1 0 0 0 -1 0.5000000 0.7500000 0.2500000
0 0 -1 0 1 0 1 0 0 0.5000000 0.7500000 0.2500000
0 0 1 0 -1 0 -1 0 0 0.5000000 0.7500000 0.2500000
-1 0 0 0 1 0 0 0 -1 0.5000000 0.7500000 0.2500000
1 0 0 0 -1 0 0 0 1 0.5000000 0.7500000 0.2500000
0 0 1 0 1 0 -1 0 0 0.5000000 0.7500000 0.2500000
0 0 -1 0 -1 0 1 0 0 0.5000000 0.7500000 0.2500000
1 0 0 0 -1 0 0 0 -1 0.5000000 0.7500000 0.2500000
-1 0 0 0 1 0 0 0 1 0.5000000 0.7500000 0.2500000
0 0 -1 0 -1 0 -1 0 0 0.5000000 0.7500000 0.2500000
0 0 1 0 1 0 1 0 0 0.5000000 0.7500000 0.2500000
-1 0 0 0 -1 0 0 0 1 0.5000000 0.7500000 0.2500000
1 0 0 0 1 0 0 0 -1 0.5000000 0.7500000 0.2500000
0 0 1 0 -1 0 1 0 0 0.5000000 0.7500000 0.2500000
0 0 -1 0 1 0 -1 0 0 0.5000000 0.7500000 0.2500000
0 1 0 0 0 1 1 0 0 0.5000000 0.7500000 0.2500000
0 -1 0 0 0 -1 -1 0 0 0.5000000 0.7500000 0.2500000
0 1 0 1 0 0 0 0 -1 0.5000000 0.7500000 0.2500000
0 -1 0 -1 0 0 0 0 1 0.5000000 0.7500000 0.2500000
0 1 0 0 0 -1 -1 0 0 0.5000000 0.7500000 0.2500000
0 -1 0 0 0 1 1 0 0 0.5000000 0.7500000 0.2500000
0 1 0 -1 0 0 0 0 1 0.5000000 0.7500000 0.2500000
0 -1 0 1 0 0 0 0 -1 0.5000000 0.7500000 0.2500000
0 -1 0 0 0 -1 1 0 0 0.5000000 0.7500000 0.2500000
0 1 0 0 0 1 -1 0 0 0.5000000 0.7500000 0.2500000
0 -1 0 -1 0 0 0 0 -1 0.5000000 0.7500000 0.2500000
0 1 0 1 0 0 0 0 1 0.5000000 0.7500000 0.2500000
0 -1 0 0 0 1 -1 0 0 0.5000000 0.7500000 0.2500000
0 1 0 0 0 -1 1 0 0 0.5000000 0.7500000 0.2500000
0 -1 0 1 0 0 0 0 1 0.5000000 0.7500000 0.2500000
0 1 0 -1 0 0 0 0 -1 0.5000000 0.7500000 0.2500000
0 0 1 1 0 0 0 1 0 0.5000000 0.7500000 0.2500000
0 0 -1 -1 0 0 0 -1 0 0.5000000 0.7500000 0.2500000
1 0 0 0 0 -1 0 1 0 0.5000000 0.7500000 0.2500000
-1 0 0 0 0 1 0 -1 0 0.5000000 0.7500000 0.2500000
0 0 -1 -1 0 0 0 1 0 0.5000000 0.7500000 0.2500000
0 0 1 1 0 0 0 -1 0 0.5000000 0.7500000 0.2500000
-1 0 0 0 0 1 0 1 0 0.5000000 0.7500000 0.2500000
1 0 0 0 0 -1 0 -1 0 0.5000000 0.7500000 0.2500000
0 0 -1 1 0 0 0 -1 0 0.5000000 0.7500000 0.2500000
0 0 1 -1 0 0 0 1 0 0.5000000 0.7500000 0.2500000
-1 0 0 0 0 -1 0 -1 0 0.5000000 0.7500000 0.2500000
1 0 0 0 0 1 0 1 0 0.5000000 0.7500000 0.2500000
0 0 1 -1 0 0 0 -1 0 0.5000000 0.7500000 0.2500000
0 0 -1 1 0 0 0 1 0 0.5000000 0.7500000 0.2500000
1 0 0 0 0 1 0 -1 0 0.5000000 0.7500000 0.2500000
-1 0 0 0 0 -1 0 1 0 0.5000000 0.7500000 0.2500000
1 0 0 0 1 0 0 0 1 0.2500000 0.5000000 0.2500000
-1 0 0 0 -1 0 0 0 -1 0.2500000 0.5000000 0.2500000
0 0 -1 0 1 0 1 0 0 0.2500000 0.5000000 0.2500000
0 0 1 0 -1 0 -1 0 0 0.2500000 0.5000000 0.2500000
-1 0 0 0 1 0 0 0 -1 0.2500000 0.5000000 0.2500000
1 0 0 0 -1 0 0 0 1 0.2500000 0.5000000 0.2500000
0 0 1 0 1 0 -1 0 0 0.2500000 0.5000000 0.2500000
0 0 -1 0 -1 0 1 0 0 0.2500000 0.5000000 0.2500000
1 0 0 0 -1 0 0 0 -1 0.2500000 0.5000000 0.2500000
-1 0 0 0 1 0 0 0 1 0.2500000 0.5000000 0.2500000
0 0 -1 0 -1 0 -1 0 0 0.2500000 0.5000000 0.2500000
0 0 1 0 1 0 1 0 0 0.2500000 0.5000000 0.2500000
-1 0 0 0 -1 0 0 0 1 0.2500000 0.5000000 0.2500000
1 0 0 0 1 0 0 0 -1 0.2500000 0.5000000 0.2500000
0 0 1 0 -1 0 1 0 0 0.2500000 0.5000000 0.2500000
0 0 -1 0 1 0 -1 0 0 0.2500000 0.5000000 0.2500000
0 1 0 0 0 1 1 0 0 0.2500000 0.5000000 0.2500000
0 -1 0 0 0 -1 -1 0 0 0.2500000 0.5000000 0.2500000
0 1 0 1 0 0 0 0 -1 0.2500000 0.5000000 0.2500000
0 -1 0 -1 0 0 0 0 1 0.2500000 0.5000000 0.2500000
0 1 0 0 0 -1 -1 0 0 0.2500000 0.5000000 0.2500000
0 -1 0 0 0 1 1 0 0 0.2500000 0.5000000 0.2500000
0 1 0 -1 0 0 0 0 1 0.2500000 0.5000000 0.2500000
0 -1 0 1 0 0 0 0 -1 0.2500000 0.5000000 0.2500000
0 -1 0 0 0 -1 1 0 0 0.2500000 0.5000000 0.2500000
0 1 0 0 0 1 -1 0 0 0.2500000 0.5000000 0.2500000
0 -1 0 -1 0 0 0 0 -1 0.2500000 0.5000000 0.2500000
0 1 0 1 0 0 0 0 1 0.2500000 0.5000000 0.2500000
0 -1 0 0 0 1 -1 0 0 0.2500000 0.5000000 0.2500000
0 1 0 0 0 -1 1 0 0 0.2500000 0.5000000 0.2500000
0 -1 0 1 0 0 0 0 1 0.2500000 0.5000000 0.2500000
0 1 0 -1 0 0 0 0 -1 0.2500000 0.5000000 0.2500000
0 0 1 1 0 0 0 1 0 0.2500000 0.5000000 0.2500000
0 0 -1 -1 0 0 0 -1 0 0.2500000 0.5000000 0.2500000
1 0 0 0 0 -1 0 1 0 0.2500000 0.5000000 0.2500000
-1 0 0 0 0 1 0 -1 0 0.2500000 0.5000000 0.2500000
0 0 -1 -1 0 0 0 1 0 0.2500000 0.5000000 0.2500000
0 0 1 1 0 0 0 -1 0 0.2500000 0.5000000 0.2500000
-1 0 0 0 0 1 0 1 0 0.2500000 0.5000000 0.2500000
1 0 0 0 0 -1 0 -1 0 0.2500000 0.5000000 0.2500000
0 0 -1 1 0 0 0 -1 0 0.2500000 0.5000000 0.2500000
0 0 1 -1 0 0 0 1 0 0.2500000 0.5000000 0.2500000
-1 0 0 0 0 -1 0 -1 0 0.2500000 0.5000000 0.2500000
1 0 0 0 0 1 0 1 0 0.2500000 0.5000000 0.2500000
0 0 1 -1 0 0 0 -1 0 0.2500000 0.5000000 0.2500000
0 0 -1 1 0 0 0 1 0 0.2500000 0.5000000 0.2500000
1 0 0 0 0 1 0 -1 0 0.2500000 0.5000000 0.2500000
-1 0 0 0 0 -1 0 1 0 0.2500000 0.5000000 0.2500000
1 0 0 0 1 0 0 0 1 0.0000000 0.2500000 0.2500000
-1 0 0 0 -1 0 0 0 -1 0.0000000 0.2500000 0.2500000
0 0 -1 0 1 0 1 0 0 0.0000000 0.2500000 0.2500000
0 0 1 0 -1 0 -1 0 0 0.0000000 0.2500000 0.2500000
-1 0 0 0 1 0 0 0 -1 0.0000000 0.2500000 0.2500000
1 0 0 0 -1 0 0 0 1 0.0000000 0.2500000 0.2500000
0 0 1 0 1 0 -1 0 0 0.0000000 0.2500000 0.2500000
0 0 -1 0 -1 0 1 0 0 0.0000000 0.2500000 0.2500000
1 0 0 0 -1 0 0 0 -1 0.0000000 0.2500000 0.2500000
-1 0 0 0 1 0 0 0 1 0.0000000 0.2500000 0.2500000
0 0 -1 0 -1 0 -1 0 0 0.0000000 0.2500000 0.2500000
0 0 1 0 1 0 1 0 0 0.0000000 0.2500000 0.2500000
-1 0 0 0 -1 0 0 0 1 0.0000000 0.2500000 0.2500000
1 0 0 0 1 0 0 0 -1 0.0000000 0.2500000 0.2500000
0 0 1 0 -1 0 1 0 0 0.0000000 0.2500000 0.2500000
0 0 -1 0 1 0 -1 0 0 0.0000000 0.2500000 0.2500000
0 1 0 0 0 1 1 0 0 0.0000000 0.2500000 0.2500000
0 -1 0 0 0 -1 -1 0 0 0.0000000 0.2500000 0.2500000
0 1 0 1 0 0 0 0 -1 0.0000000 0.2500000 0.2500000
0 -1 0 -1 0 0 0 0 1 0.0000000 0.2500000 0.2500000
0 1 0 0 0 -1 -1 0 0 0.0000000 0.2500000 0.2500000
0 -1 0 0 0 1 1 0 0 0.0000000 0.2500000 0.2500000
0 1 0 -1 0 0 0 0 1 0.0000000 0.2500000 0.2500000
0 -1 0 1 0 0 0 0 -1 0.0000000 0.2500000 0.2500000
0 -1 0 0 0 -1 1 0 0 0.0000000 0.2500000 0.2500000
0 1 0 0 0 1 -1 0 0 0.0000000 0.2500000 0.2500000
0 -1 0 -1 0 0 0 0 -1 0.0000000 0.2500000 0.2500000
0 1 0 1 0 0 0 0 1 0.0000000 0.2500000 0.2500000
0 -1 0 0 0 1 -1 0 0 0.0000000 0.2500000 0.2500000
0 1 0 0 0 -1 1 0 0 0.0000000 0.2500000 0.2500000
0 -1 0 1 0 0 0 0 1 0.0000000 0.2500000 0.2500000
0 1 0 -1 0 0 0 0 -1 0.0000000 0.2500000 0.2500000
0 0 1 1 0 0 0 1 0 0.0000000 0.2500000 0.2500000
0 0 -1 -1 0 0 0 -1 0 0.0000000 0.2500000 0.2500000
1 0 0 0 0 -1 0 1 0 0.0000000 0.2500000 0.2500000
-1 0 0 0 0 1 0 -1 0 0.0000000 0.2500000 0.2500000
0 0 -1 -1 0 0 0 1 0 0.0000000 0.2500000 0.2500000
0 0 1 1 0 0 0 -1 0 0.0000000 0.2500000 0.2500000
-1 0 0 0 0 1 0 1 0 0.0000000 0.2500000 0.2500000
1 0 0 0 0 -1 0 -1 0 0.0000000 0.2500000 0.2500000
0 0 -1 1 0 0 0 -1 0 0.0000000 0.2500000 0.2500000
0 0 1 -1 0 0 0 1 0 0.0000000 0.2500000 0.2500000
-1 0 0 0 0 -1 0 -1 0 0.0000000 0.2500000 0.2500000
1 0 0 0 0 1 0 1 0 0.0000000 0.2500000 0.2500000
0 0 1 -1 0 0 0 -1 0 0.0000000 0.2500000 0.2500000
0 0 -1 1 0 0 0 1 0 0.0000000 0.2500000 0.2500000
1 0 0 0 0 1 0 -1 0 0.0000000 0.2500000 0.2500000
-1 0 0 0 0 -1 0 1 0 0.0000000 0.2500000 0.2500000
1 0 0 0 1 0 0 0 1 0.7500000 0.2500000 0.0000000
-1 0 0 0 -1 0 0 0 -1 0.7500000 0.2500000 0.0000000
0 0 -1 0 1 0 1 0 0 0.7500000 0.2500000 0.0000000
0 0 1 0 -1 0 -1 0 0 0.7500000 0.2500000 0.0000000
-1 0 0 0 1 0 0 0 -1 0.7500000 0.2500000 0.0000000
1 0 0 0 -1 0 0 0 1 0.7500000 0.2500000 0.0000000
0 0 1 0 1 0 -1 0 0 0.7500000 0.2500000 0.0000000
0 0 -1 0 -1 0 1 0 0 0.7500000 0.2500000 0.0000000
1 0 0 0 -1 0 0 0 -1 0.7500000 0.2500000 0.0000000
-1 0 0 0 1 0 0 0 1 0.7500000 0.2500000 0.0000000
0 0 -1 0 -1 0 -1 0 0 0.7500000 0.2500000 0.0000000
0 0 1 0 1 0 1 0 0 0.7500000 0.2500000 0.0000000
-1 0 0 0 -1 0 0 0 1 0.7500000 0.2500000 0.0000000
1 0 0 0 1 0 0 0 -1 0.7500000 0.2500000 0.0000000
0 0 1 0 -1 0 1 0 0 0.7500000 0.2500000 0.0000000
0 0 -1 0 1 0 -1 0 0 0.7500000 0.2500000 0.0000000
0 1 0 0 0 1 1 0 0 0.7500000 0.2500000 0.0000000
0 -1 0 0 0 -1 -1 0 0 0.7500000 0.2500000 0.0000000
0 1 0 1 0 0 0 0 -1 0.7500000 0.2500000 0.0000000
0 -1 0 -1 0 0 0 0 1 0.7500000 0.2500000 0.0000000
0 1 0 0 0 -1 -1 0 0 0.7500000 0.2500000 0.0000000
0 -1 0 0 0 1 1 0 0 0.7500000 0.2500000 0.0000000
0 1 0 -1 0 0 0 0 1 0.7500000 0.2500000 0.0000000
0 -1 0 1 0 0 0 0 -1 0.7500000 0.2500000 0.0000000
0 -1 0 0 0 -1 1 0 0 0.7500000 0.2500000 0.0000000
0 1 0 0 0 1 -1 0 0 0.7500000 0.2500000 0.0000000
0 -1 0 -1 0 0 0 0 -1 0.7500000 0.2500000 0.0000000
0 1 0 1 0 0 0 0 1 0.7500000 0.2500000 0.0000000
0 -1 0 0 0 1 -1 0 0 0.7500000 0.2500000 0.0000000
0 1 0 0 0 -1 1 0 0 0.7500000 0.2500000 0.0000000
0 -1 0 1 0 0 0 0 1 0.7500000 0.2500000 0.0000000
0 1 0 -1 0 0 0 0 -1 0.7500000 0.2500000 0.0000000
0 0 1 1 0 0 0 1 0 0.7500000 0.2500000 0.0000000
0 0 -1 -1 0 0 0 -1 0 0.7500000 0.2500000 0.0000000
1 0 0 0 0 -1 0 1 0 0.7500000 0.2500000 0.0000000
-1 0 0 0 0 1 0 -1 0 0.7500000 0.2500000 0.0000000
0 0 -1 -1 0 0 0 1 0 0.7500000 0.2500000 0.0000000
0 0 1 1 0 0 0 -1 0 0.7500000 0.2500000 0.0000000
-1 0 0 0 0 1 0 1 0 0.7500000 0.2500000 0.0000000
1 0 0 0 0 -1 0 -1 0 0.7500000 0.2500000 0.0000000
0 0 -1 1 0 0 0 -1 0 0.7500000 0.2500000 0.0000000
0 0 1 -1 0 0 0 1 0 0.7500000 0.2500000 0.0000000
-1 0 0 0 0 -1 0 -1 0 0.7500000 0.2500000 0.0000000
1 0 0 0 0 1 0 1 0 0.7500000 0.2500000 0.0000000
0 0 1 -1 0 0 0 -1 0 0.7500000 0.2500000 0.0000000
0 0 -1 1 0 0 0 1 0 0.7500000 0.2500000 0.0000000
1 0 0 0 0 1 0 -1 0 0.7500000 0.2500000 0.0000000
-1 0 0 0 0 -1 0 1 0 0.7500000 0.2500000 0.0000000
1 0 0 0 1 0 0 0 1 0.5000000 0.0000000 0.0000000
-1 0 0 0 -1 0 0 0 -1 0.5000000 0.0000000 0.0000000
0 0 -1 0 1 0 1 0 0 0.5000000 0.0000000 0.0000000
0 0 1 0 -1 0 -1 0 0 0.5000000 0.0000000 0.0000000
-1 0 0 0 1 0 0 0 -1 0.5000000 0.0000000 0.0000000
1 0 0 0 -1 0 0 0 1 0.5000000 0.0000000 0.0000000
0 0 1 0 1 0 -1 0 0 0.5000000 0.0000000 0.0000000
0 0 -1 0 -1 0 1 0 0 0.5000000 0.0000000 0.0000000
1 0 0 0 -1 0 0 0 -1 0.5000000 0.0000000 0.0000000
-1 0 0 0 1 0 0 0 1 0.5000000 0.0000000 0.0000000
0 0 -1 0 -1 0 -1 0 0 0.5000000 0.0000000 0.0000000
0 0 1 0 1 0 1 0 0 0.5000000 0.0000000 0.0000000
-1 0 0 0 -1 0 0 0 1 0.5000000 0.0000000 0.0000000
1 0 0 0 1 0 0 0 -1 0.5000000 0.0000000 0.0000000
0 0 1 0 -1 0 1 0 0 0.5000000 0.0000000 0.0000000
0 0 -1 0 1 0 -1 0 0 0.5000000 0.0000000 0.0000000
0 1 0 0 0 1 1 0 0 0.5000000 0.0000000 0.0000000
0 -1 0 0 0 -1 -1 0 0 0.5000000 0.0000000 0.0000000
0 1 0 1 0 0 0 0 -1 0.5000000 0.0000000 0.0000000
0 -1 0 -1 0 0 0 0 1 0.5000000 0.0000000 0.0000000
0 1 0 0 0 -1 -1 0 0 0.5000000 0.0000000 0.0000000
0 -1 0 0 0 1 1 0 0 0.5000000 0.0000000 0.0000000
0 1 0 -1 0 0 0 0 1 0.5000000 0.0000000 0.0000000
0 -1 0 1 0 0 0 0 -1 0.5000000 0.0000000 0.0000000
0 -1 0 0 0 -1 1 0 0 0.5000000 0.0000000 0.0000000
0 1 0 0 0 1 -1 0 0 0.5000000 0.0000000 0.0000000
0 -1 0 -1 0 0 0 0 -1 0.5000000 0.0000000 0.0000000
0 1 0 1 0 0 0 0 1 0.5000000 0.0000000 0.0000000
0 -1 0 0 0 1 -1 0 0 0.5000000 0.0000000 0.0000000
0 1 0 0 0 -1 1 0 0 0.5000000 0.0000000 0.0000000
0 -1 0 1 0 0 0 0 1 0.5000000 0.0000000 0.0000000
0 1 0 -1 0 0 0 0 -1 0.5000000 0.0000000 0.0000000
0 0 1 1 0 0 0 1 0 0.5000000 0.0000000 0.0000000
0 0 -1 -1 0 0 0 -1 0 0.5000000 0.0000000 0.0000000
1 0 0 0 0 -1 0 1 0 0.5000000 0.0000000 0.0000000
-1 0 0 0 0 1 0 -1 0 0.5000000 0.0000000 0.0000000
0 0 -1 -1 0 0 0 1 0 0.5000000 0.0000000 0.0000000
0 0 1 1 0 0 0 -1 0 0.5000000 0.0000000 0.0000000
-1 0 0 0 0 1 0 1 0 0.5000000 0.0000000 0.0000000
1 0 0 0 0 -1 0 -1 0 0.5000000 0.0000000 0.0000000
0 0 -1 1 0 0 0 -1 0 0.5000000 0.0000000 0.0000000
0 0 1 -1 0 0 0 1 0 0.5000000 0.0000000 0.0000000
-1 0 0 0 0 -1 0 -1 0 0.5000000 0.0000000 0.0000000
1 0 0 0 0 1 0 1 0 0.5000000 0.0000000 0.0000000
0 0 1 -1 0 0 0 -1 0 0.5000000 0.0000000 0.0000000
0 0 -1 1 0 0 0 1 0 0.5000000 0.0000000 0.0000000
1 0 0 0 0 1 0 -1 0 0.5000000 0.0000000 0.0000000
-1 0 0 0 0 -1 0 1 0 0.5000000 0.0000000 0.0000000
1 0 0 0 1 0 0 0 1 0.2500000 0.7500000 0.0000000
-1 0 0 0 -1 0 0 0 -1 0.2500000 0.7500000 0.0000000
0 0 -1 0 1 0 1 0 0 0.2500000 0.7500000 0.0000000
0 0 1 0 -1 0 -1 0 0 0.2500000 0.7500000 0.0000000
-1 0 0 0 1 0 0 0 -1 0.2500000 0.7500000 0.0000000
1 0 0 0 -1 0 0 0 1 0.2500000 0.7500000 0.0000000
0 0 1 0 1 0 -1 0 0 0.2500000 0.7500000 0.0000000
0 0 -1 0 -1 0 1 0 0 0.2500000 0.7500000 0.0000000
1 0 0 0 -1 0 0 0 -1 0.2500000 0.7500000 0.0000000
-1 0 0 0 1 0 0 0 1 0.2500000 0.7500000 0.0000000
0 0 -1 0 -1 0 -1 0 0 0.2500000 0.7500000 0.0000000
0 0 1 0 1 0 1 0 0 0.2500000 0.7500000 0.0000000
-1 0 0 0 -1 0 0 0 1 0.2500000 0.7500000 0.0000000
1 0 0 0 1 0 0 0 -1 0.2500000 0.7500000 0.0000000
0 0 1 0 -1 0 1 0 0 0.2500000 0.7500000 0.0000000
0 0 -1 0 1 0 -1 0 0 0.2500000 0.7500000 0.0000000
0 1 0 0 0 1 1 0 0 0.2500000 0.7500000 0.0000000
0 -1 0 0 0 -1 -1 0 0 0.2500000 0.7500000 0.0000000
0 1 0 1 0 0 0 0 -1 0.2500000 0.7500000 0.0000000
0 -1 0 -1 0 0 0 0 1 0.2500000 0.7500000 0.0000000
0 1 0 0 0 -1 -1 0 0 0.2500000 0.7500000 0.0000000
0 -1 0 0 0 1 1 0 0 0.2500000 0.7500000 0.0000000
0 1 0 -1 0 0 0 0 1 0.2500000 0.7500000 0.0000000
0 -1 0 1 0 0 0 0 -1 0.2500000 0.7500000 0.0000000
0 -1 0 0 0 -1 1 0 0 0.2500000 0.7500000 0.0000000
0 1 0 0 0 1 -1 0 0 0.2500000 0.7500000 0.0000000
0 -1 0 -1 0 0 0 0 -1 0.2500000 0.7500000 0.0000000
0 1 0 1 0 0 0 0 1 0.2500000 0.7500000 0.0000000
0 -1 0 0 0 1 -1 0 0 0.2500000 0.7500000 0.0000000
0 1 0 0 0 -1 1 0 0 0.2500000 0.7500000 0.0000000
0 -1 0 1 0 0 0 0 1 0.2500000 0.7500000 0.0000000
0 1 0 -1 0 0 0 0 -1 0.2500000 0.7500000 0.0000000
0 0 1 1 0 0 0 1 0 0.2500000 0.7500000 0.0000000
0 0 -1 -1 0 0 0 -1 0 0.2500000 0.7500000 0.0000000
1 0 0 0 0 -1 0 1 0 0.2500000 0.7500000 0.0000000
-1 0 0 0 0 1 0 -1 0 0.2500000 0.7500000 0.0000000
0 0 -1 -1 0 0 0 1 0 0.2500000 0.7500000 0.0000000
0 0 1 1 0 0 0 -1 0 0.2500000 0.7500000 0.0000000
-1 0 0 0 0 1 0 1 0 0.2500000 0.7500000 0.0000000
1 0 0 0 0 -1 0 -1 0 0.2500000 0.7500000 0.0000000
0 0 -1 1 0 0 0 -1 0 0.2500000 0.7500000 0.0000000
0 0 1 -1 0 0 0 1 0 0.2500000 0.7500000 0.0000000
-1 0 0 0 0 -1 0 -1 0 0.2500000 0.7500000 0.0000000
1 0 0 0 0 1 0 1 0 0.2500000 0.7500000 0.0000000
0 0 1 -1 0 0 0 -1 0 0.2500000 0.7500000 0.0000000
0 0 -1 1 0 0 0 1 0 0.2500000 0.7500000 0.0000000
1 0 0 0 0 1 0 -1 0 0.2500000 0.7500000 0.0000000
-1 0 0 0 0 -1 0 1 0 0.2500000 0.7500000 0.0000000
1 0 0 0 1 0 0 0 1 0.0000000 0.5000000 0.0000000
-1 0 0 0 -1 0 0 0 -1 0.0000000 0.5000000 0.0000000
0 0 -1 0 1 0 1 0 0 0.0000000 0.5000000 0.0000000
0 0 1 0 -1 0 -1 0 0 0.0000000 0.5000000 0.0000000
-1 0 0 0 1 0 0 0 -1 0.0000000 0.5000000 0.0000000
1 0 0 0 -1 0 0 0 1 0.0000000 0.5000000 0.0000000
0 0 1 0 1 0 -1 0 0 0.0000000 0.5000000 0.0000000
0 0 -1 0 -1 0 1 0 0 0.0000000 0.5000000 0.0000000
1 0 0 0 -1 0 0 0 -1 0.0000000 0.5000000 0.0000000
-1 0 0 0 1 0 0 0 1 0.0000000 0.5000000 0.0000000
0 0 -1 0 -1 0 -1 0 0 0.0000000 0.5000000 0.0000000
0 0 1 0 1 0 1 0 0 0.0000000 0.5000000 0.0000000
-1 0 0 0 -1 0 0 0 1 0.0000000 0.5000000 0.0000000
1 0 0 0 1 0 0 0 -1 0.0000000 0.5000000 0.0000000
0 0 1 0 -1 0 1 0 0 0.0000000 0.5000000 0.0000000
0 0 -1 0 1 0 -1 0 0 0.0000000 0.5000000 0.0000000
0 1 0 0 0 1 1 0 0 0.0000000 0.5000000 0.0000000
0 -1 0 0 0 -1 -1 0 0 0.0000000 0.5000000 0.0000000
0 1 0 1 0 0 0 0 -1 0.0000000 0.5000000 0.0000000
0 -1 0 -1 0 0 0 0 1 0.0000000 0.5000000 0.0000000
0 1 0 0 0 -1 -1 0 0 0.0000000 0.5000000 0.0000000
0 -1 0 0 0 1 1 0 0 0.0000000 0.5000000 0.0000000
0 1 0 -1 0 0 0 0 1 0.0000000 0.5000000 0.0000000
0 -1 0 1 0 0 0 0 -1 0.0000000 0.5000000 0.0000000
0 -1 0 0 0 -1 1 0 0 0.0000000 0.5000000 0.0000000
0 1 0 0 0 1 -1 0 0 0.0000000 0.5000000 0.0000000
0 -1 0 -1 0 0 0 0 -1 0.0000000 0.5000000 0.0000000
0 1 0 1 0 0 0 0 1 0.0000000 0.5000000 0.0000000
0 -1 0 0 0 1 -1 0 0 0.0000000 0.5000000 0.0000000
0 1 0 0 0 -1 1 0 0 0.0000000 0.5000000 0.0000000
0 -1 0 1 0 0 0 0 1 0.0000000 0.5000000 0.0000000
0 1 0 -1 0 0 0 0 -1 0.0000000 0.5000000 0.0000000
0 0 1 1 0 0 0 1 0 0.0000000 0.5000000 0.0000000
0 0 -1 -1 0 0 0 -1 0 0.0000000 0.5000000 0.0000000
1 0 0 0 0 -1 0 1 0 0.0000000 0.5000000 0.0000000
-1 0 0 0 0 1 0 -1 0 0.0000000 0.5000000 0.0000000
0 0 -1 -1 0 0 0 1 0 0.0000000 0.5000000 0.0000000
0 0 1 1 0 0 0 -1 0 0.0000000 0.5000000 0.0000000
-1 0 0 0 0 1 0 1 0 0.0000000 0.5000000 0.0000000
1 0 0 0 0 -1 0 -1 0 0.0000000 0.5000000 0.0000000
0 0 -1 1 0 0 0 -1 0 0.0000000 0.5000000 0.0000000
0 0 1 -1 0 0 0 1 0 0.0000000 0.5000000 0.0000000
-1 0 0 0 0 -1 0 -1 0 0.0000000 0.5000000 0.0000000
1 0 0 0 0 1 0 1 0 0.0000000 0.5000000 0.0000000
0 0 1 -1 0 0 0 -1 0 0.0000000 0.5000000 0.0000000
0 0 -1 1 0 0 0 1 0 0.0000000 0.5000000 0.0000000
1 0 0 0 0 1 0 -1 0 0.0000000 0.5000000 0.0000000
-1 0 0 0 0 -1 0 1 0 0.0000000 0.5000000 0.0000000
1 0 0 0 1 0 0 0 1 0.0000000 0.2500000 0.7500000
-1 0 0 0 -1 0 0 0 -1 0.0000000 0.2500000 0.7500000
0 0 -1 0 1 0 1 0 0 0.0000000 0.2500000 0.7500000
0 0 1 0 -1 0 -1 0 0 0.0000000 0.2500000 0.7500000
-1 0 0 0 1 0 0 0 -1 0.0000000 0.2500000 0.7500000
1 0 0 0 -1 0 0 0 1 0.0000000 0.2500000 0.7500000
0 0 1 0 1 0 -1 0 0 0.0000000 0.2500000 0.7500000
0 0 -1 0 -1 0 1 0 0 0.0000000 0.2500000 0.7500000
1 0 0 0 -1 0 0 0 -1 0.0000000 0.2500000 0.7500000
-1 0 0 0 1 0 0 0 1 0.0000000 0.2500000 0.7500000
0 0 -1 0 -1 0 -1 0 0 0.0000000 0.2500000 0.7500000
0 0 1 0 1 0 1 0 0 0.0000000 0.2500000 0.7500000
-1 0 0 0 -1 0 0 0 1 0.0000000 0.2500000 0.7500000
1 0 0 0 1 0 0 0 -1 0.0000000 0.2500000 0.7500000
0 0 1 0 -1 0 1 0 0 0.0000000 0.2500000 0.7500000
0 0 -1 0 1 0 -1 0 0 0.0000000 0.2500000 0.7500000
0 1 0 0 0 1 1 0 0 0.0000000 0.2500000 0.7500000
0 -1 0 0 0 -1 -1 0 0 0.0000000 0.2500000 0.7500000
0 1 0 1 0 0 0 0 -1 0.0000000 0.2500000 0.7500000
0 -1 0 -1 0 0 0 0 1 0.0000000 0.2500000 0.7500000
0 1 0 0 0 -1 -1 0 0 0.0000000 0.2500000 0.7500000
0 -1 0 0 0 1 1 0 0 0.0000000 0.2500000 0.7500000
0 1 0 -1 0 0 0 0 1 0.0000000 0.2500000 0.7500000
0 -1 0 1 0 0 0 0 -1 0.0000000 0.2500000 0.7500000
0 -1 0 0 0 -1 1 0 0 0.0000000 0.2500000 0.7500000
0 1 0 0 0 1 -1 0 0 0.0000000 0.2500000 0.7500000
0 -1 0 -1 0 0 0 0 -1 0.0000000 0.2500000 0.7500000
0 1 0 1 0 0 0 0 1 0.0000000 0.2500000 0.7500000
0 -1 0 0 0 1 -1 0 0 0.0000000 0.2500000 0.7500000
0 1 0 0 0 -1 1 0 0 0.0000000 0.2500000 0.7500000
0 -1 0 1 0 0 0 0 1 0.0000000 0.2500000 0.7500000
0 1 0 -1 0 0 0 0 -1 0.0000000 0.2500000 0.7500000
0 0 1 1 0 0 0 1 0 0.0000000 0.2500000 0.7500000
0 0 -1 -1 0 0 0 -1 0 0.0000000 0.2500000 0.7500000
1 0 0 0 0 -1 0 1 0 0.0000000 0.2500000 0.7500000
-1 0 0 0 0 1 0 -1 0 0.0000000 0.2500000 0.7500000
0 0 -1 -1 0 0 0 1 0 0.0000000 0.2500000 0.7500000
0 0 1 1 0 0 0 -1 0 0.0000000 0.2500000 0.7500000
-1 0 0 0 0 1 0 1 0 0.0000000 0.2500000 0.7500000
1 0 0 0 0 -1 0 -1 0 0.0000000 0.2500000 0.7500000
0 0 -1 1 0 0 0 -1 0 0.0000000 0.2500000 0.7500000
0 0 1 -1 0 0 0 1 0 0.0000000 0.2500000 0.7500000
-1 0 0 0 0 -1 0 -1 0 0.0000000 0.2500000 0.7500000
1 0 0 0 0 1 0 1 0 0.0000000 0.2500000 0.7500000
0 0 1 -1 0 0 0 -1 0 0.0000000 0.2500000 0.7500000
0 0 -1 1 0 0 0 1 0 0.0000000 0.2500000 0.7500000
1 0 0 0 0 1 0 -1 0 0.0000000 0.2500000 0.7500000
-1 0 0 0 0 -1 0 1 0 0.0000000 0.2500000 0.7500000
1 0 0 0 1 0 0 0 1 0.7500000 0.0000000 0.7500000
-1 0 0 0 -1 0 0 0 -1 0.7500000 0.0000000 0.7500000
0 0 -1 0 1 0 1 0 0 0.7500000 0.0000000 0.7500000
0 0 1 0 -1 0 -1 0 0 0.7500000 0.0000000 0.7500000
-1 0 0 0 1 0 0 0 -1 0.7500000 0.0000000 0.7500000
1 0 0 0 -1 0 0 0 1 0.7500000 0.0000000 0.7500000
0 0 1 0 1 0 -1 0 0 0.7500000 0.0000000 0.7500000
0 0 -1 0 -1 0 1 0 0 0.7500000 0.0000000 0.7500000
1 0 0 0 -1 0 0 0 -1 0.7500000 0.0000000 0.7500000
-1 0 0 0 1 0 0 0 1 0.7500000 0.0000000 0.7500000
0 0 -1 0 -1 0 -1 0 0 0.7500000 0.0000000 0.7500000
0 0 1 0 1 0 1 0 0 0.7500000 0.0000000 0.7500000
-1 0 0 0 -1 0 0 0 1 0.7500000 0.0000000 0.7500000
1 0 0 0 1 0 0 0 -1 0.7500000 0.0000000 0.7500000
0 0 1 0 -1 0 1 0 0 0.7500000 0.0000000 0.7500000
0 0 -1 0 1 0 -1 0 0 0.7500000 0.0000000 0.7500000
0 1 0 0 0 1 1 0 0 0.7500000 0.0000000 0.7500000
0 -1 0 0 0 -1 -1 0 0 0.7500000 0.0000000 0.7500000
0 1 0 1 0 0 0 0 -1 0.7500000 0.0000000 0.7500000
0 -1 0 -1 0 0 0 0 1 0.7500000 0.0000000 0.7500000
0 1 0 0 0 -1 -1 0 0 0.7500000 0.0000000 0.7500000
0 -1 0 0 0 1 1 0 0 0.7500000 0.0000000 0.7500000
0 1 0 -1 0 0 0 0 1 0.7500000 0.0000000 0.7500000
0 -1 0 1 0 0 0 0 -1 0.7500000 0.0000000 0.7500000
0 -1 0 0 0 -1 1 0 0 0.7500000 0.0000000 0.7500000
0 1 0 0 0 1 -1 0 0 0.7500000 0.0000000 0.7500000
0 -1 0 -1 0 0 0 0 -1 0.7500000 0.0000000 0.7500000
0 1 0 1 0 0 0 0 1 0.7500000 0.0000000 0.7500000
0 -1 0 0 0 1 -1 0 0 0.7500000 0.0000000 0.7500000
0 1 0 0 0 -1 1 0 0 0.7500000 0.0000000 0.7500000
0 -1 0 1 0 0 0 0 1 0.7500000 0.0000000 0.7500000
0 1 0 -1 0 0 0 0 -1 0.7500000 0.0000000 0.7500000
0 0 1 1 0 0 0 1 0 0.7500000 0.0000000 0.7500000
0 0 -1 -1 0 0 0 -1 0 0.7500000 0.0000000 0.7500000
1 0 0 0 0 -1 0 1 0 0.7500000 0.0000000 0.7500000
-1 0 0 0 0 1 0 -1 0 0.7500000 0.0000000 0.7500000
0 0 -1 -1 0 0 0 1 0 0.7500000 0.0000000 0.7500000
0 0 1 1 0 0 0 -1 0 0.7500000 0.0000000 0.7500000
-1 0 0 0 0 1 0 1 0 0.7500000 0.0000000 0.7500000
1 0 0 0 0 -1 0 -1 0 0.7500000 0.0000000 0.7500000
0 0 -1 1 0 0 0 -1 0 0.7500000 0.0000000 0.7500000
0 0 1 -1 0 0 0 1 0 0.7500000 0.0000000 0.7500000
-1 0 0 0 0 -1 0 -1 0 0.7500000 0.0000000 0.7500000
1 0 0 0 0 1 0 1 0 0.7500000 0.0000000 0.7500000
0 0 1 -1 0 0 0 -1 0 0.7500000 0.0000000 0.7500000
0 0 -1 1 0 0 0 1 0 0.7500000 0.0000000 0.7500000
1 0 0 0 0 1 0 -1 0 0.7500000 0.0000000 0.7500000
-1 0 0 0 0 -1 0 1 0 0.7500000 0.0000000 0.7500000
1 0 0 0 1 0 0 0 1 0.5000000 0.7500000 0.7500000
-1 0 0 0 -1 0 0 0 -1 0.5000000 0.7500000 0.7500000
0 0 -1 0 1 0 1 0 0 0.5000000 0.7500000 0.7500000
0 0 1 0 -1 0 -1 0 0 0.5000000 0.7500000 0.7500000
-1 0 0 0 1 0 0 0 -1 0.5000000 0.7500000 0.7500000
1 0 0 0 -1 0 0 0 1 0.5000000 0.7500000 0.7500000
0 0 1 0 1 0 -1 0 0 0.5000000 0.7500000 0.7500000
0 0 -1 0 -1 0 1 0 0 0.5000000 0.7500000 0.7500000
1 0 0 0 -1 0 0 0 -1 0.5000000 0.7500000 0.7500000
-1 0 0 0 1 0 0 0 1 0.5000000 0.7500000 0.7500000
0 0 -1 0 -1 0 -1 0 0 0.5000000 0.7500000 0.7500000
0 0 1 0 1 0 1 0 0 0.5000000 0.7500000 0.7500000
-1 0 0 0 -1 0 0 0 1 0.5000000 0.7500000 0.7500000
1 0 0 0 1 0 0 0 -1 0.5000000 0.7500000 0.7500000
0 0 1 0 -1 0 1 0 0 0.5000000 0.7500000 0.7500000
0 0 -1 0 1 0 -1 0 0 0.5000000 0.7500000 0.7500000
0 1 0 0 0 1 1 0 0 0.5000000 0.7500000 0.7500000
0 -1 0 0 0 -1 -1 0 0 0.5000000 0.7500000 0.7500000
0 1 0 1 0 0 0 0 -1 0.5000000 0.7500000 0.7500000
0 -1 0 -1 0 0 0 0 1 0.5000000 0.7500000 0.7500000
0 1 0 0 0 -1 -1 0 0 0.5000000 0.7500000 0.7500000
0 -1 0 0 0 1 1 0 0 0.5000000 0.7500000 0.7500000
0 1 0 -1 0 0 0 0 1 0.5000000 0.7500000 0.7500000
0 -1 0 1 0 0 0 0 -1 0.5000000 0.7500000 0.7500000
0 -1 0 0 0 -1 1 0 0 0.5000000 0.7500000 0.7500000
0 1 0 0 0 1 -1 0 0 0.5000000 0.7500000 0.7500000
0 -1 0 -1 0 0 0 0 -1 0.5000000 0.7500000 0.7500000
0 1 0 1 0 0 0 0 1 0.5000000 0.7500000 0.7500000
0 -1 0 0 0 1 -1 0 0 0.5000000 0.7500000 0.7500000
0 1 0 0 0 -1 1 0 0 0.5000000 0.7500000 0.7500000
0 -1 0 1 0 0 0 0 1 0.5000000 0.7500000 0.7500000
0 1 0 -1 0 0 0 0 -1 0.5000000 0.7500000 0.7500000
0 0 1 1 0 0 0 1 0 0.5000000 0.7500000 0.7500000
0 0 -1 -1 0 0 0 -1 0 0.5000000 0.7500000 0.7500000
1 0 0 0 0 -1 0 1 0 0.5000000 0.7500000 0.7500000
-1 0 0 0 0 1 0 -1 0 0.5000000 0.7500000 0.7500000
0 0 -1 -1 0 0 0 1 0 0.5000000 0.7500000 0.7500000
0 0 1 1 0 0 0 -1 0 0.5000000 0.7500000 0.7500000
-1 0 0 0 0 1 0 1 0 0.5000000 0.7500000 0.7500000
1 0 0 0 0 -1 0 -1 0 0.5000000 0.7500000 0.7500000
0 0 -1 1 0 0 0 -1 0 0.5000000 0.7500000 0.7500000
0 0 1 -1 0 0 0 1 0 0.5000000 0.7500000 0.7500000
-1 0 0 0 0 -1 0 -1 0 0.5000000 0.7500000 0.7500000
1 0 0 0 0 1 0 1 0 0.5000000 0.7500000 0.7500000
0 0 1 -1 0 0 0 -1 0 0.5000000 0.7500000 0.7500000
0 0 -1 1 0 0 0 1 0 0.5000000 0.7500000 0.7500000
1 0 0 0 0 1 0 -1 0 0.5000000 0.7500000 0.7500000
-1 0 0 0 0 -1 0 1 0 0.5000000 0.7500000 0.7500000
1 0 0 0 1 0 0 0 1 0.2500000 0.5000000 0.7500000
-1 0 0 0 -1 0 0 0 -1 0.2500000 0.5000000 0.7500000
0 0 -1 0 1 0 1 0 0 0.2500000 0.5000000 0.7500000
0 0 1 0 -1 0 -1 0 0 0.2500000 0.5000000 0.7500000
-1 0 0 0 1 0 0 0 -1 0.2500000 0.5000000 0.7500000
1 0 0 0 -1 0 0 0 1 0.2500000 0.5000000 0.7500000
0 0 1 0 1 0 -1 0 0 0.2500000 0.5000000 0.7500000
0 0 -1 0 -1 0 1 0 0 0.2500000 0.5000000 0.7500000
1 0 0 0 -1 0 0 0 -1 0.2500000 0.5000000 0.7500000
-1 0 0 0 1 0 0 0 1 0.2500000 0.5000000 0.7500000
0 0 -1 0 -1 0 -1 0 0 0.2500000 0.5000000 0.7500000
0 0 1 0 1 0 1 0 0 0.2500000 0.5000000 0.7500000
-1 0 0 0 -1 0 0 0 1 0.2500000 0.5000000 0.7500000
1 0 0 0 1 0 0 0 -1 0.2500000 0.5000000 0.7500000
0 0 1 0 -1 0 1 0 0 0.2500000 0.5000000 0.7500000
0 0 -1 0 1 0 -1 0 0 0.2500000 0.5000000 0.7500000
0 1 0 0 0 1 1 0 0 0.2500000 0.5000000 0.7500000
0 -1 0 0 0 -1 -1 0 0 0.2500000 0.5000000 0.7500000
0 1 0 1 0 0 0 0 -1 0.2500000 0.5000000 0.7500000
0 -1 0 -1 0 0 0 0 1 0.2500000 0.5000000 0.7500000
0 1 0 0 0 -1 -1 0 0 0.2500000 0.5000000 0.7500000
0 -1 0 0 0 1 1 0 0 0.2500000 0.5000000 0.7500000
0 1 0 -1 0 0 0 0 1 0.2500000 0.5000000 0.7500000
0 -1 0 1 0 0 0 0 -1 0.2500000 0.5000000 0.7500000
0 -1 0 0 0 -1 1 0 0 0.2500000 0.5000000 0.7500000
0 1 0 0 0 1 -1 0 0 0.2500000 0.5000000 0.7500000
0 -1 0 -1 0 0 0 0 -1 0.2500000 0.5000000 0.7500000
0 1 0 1 0 0 0 0 1 0.2500000 0.5000000 0.7500000
0 -1 0 0 0 1 -1 0 0 0.2500000 0.5000000 0.7500000
0 1 0 0 0 -1 1 0 0 0.2500000 0.5000000 0.7500000
0 -1 0 1 0 0 0 0 1 0.2500000 0.5000000 0.7500000
0 1 0 -1 0 0 0 0 -1 0.2500000 0.5000000 0.7500000
0 0 1 1 0 0 0 1 0 0.2500000 0.5000000 0.7500000
0 0 -1 -1 0 0 0 -1 0 0.2500000 0.5000000 0.7500000
1 0 0 0 0 -1 0 1 0 0.2500000 0.5000000 0.7500000
-1 0 0 0 0 1 0 -1 0 0.2500000 0.5000000 0.7500000
0 0 -1 -1 0 0 0 1 0 0.2500000 0.5000000 0.7500000
0 0 1 1 0 0 0 -1 0 0.2500000 0.5000000 0.7500000
-1 0 0 0 0 1 0 1 0 0.2500000 0.5000000 0.7500000
1 0 0 0 0 -1 0 -1 0 0.2500000 0.5000000 0.7500000
0 0 -1 1 0 0 0 -1 0 0.2500000 0.5000000 0.7500000
0 0 1 -1 0 0 0 1 0 0.2500000 0.5000000 0.7500000
-1 0 0 0 0 -1 0 -1 0 0.2500000 0.5000000 0.7500000
1 0 0 0 0 1 0 1 0 0.2500000 0.5000000 0.7500000
0 0 1 -1 0 0 0 -1 0 0.2500000 0.5000000 0.7500000
0 0 -1 1 0 0 0 1 0 0.2500000 0.5000000 0.7500000
1 0 0 0 0 1 0 -1 0 0.2500000 0.5000000 0.7500000
-1 0 0 0 0 -1 0 1 0 0.2500000 0.5000000 0.7500000
1 0 0 0 1 0 0 0 1 0.2500000 0.2500000 0.5000000
-1 0 0 0 -1 0 0 0 -1 0.2500000 0.2500000 0.5000000
0 0 -1 0 1 0 1 0 0 0.2500000 0.2500000 0.5000000
0 0 1 0 -1 0 -1 0 0 0.2500000 0.2500000 0.5000000
-1 0 0 0 1 0 0 0 -1 0.2500000 0.2500000 0.5000000
1 0 0 0 -1 0 0 0 1 0.2500000 0.2500000 0.5000000
0 0 1 0 1 0 -1 0 0 0.2500000 0.2500000 0.5000000
0 0 -1 0 -1 0 1 0 0 0.2500000 0.2500000 0.5000000
1 0 0 0 -1 0 0 0 -1 0.2500000 0.2500000 0.5000000
-1 0 0 0 1 0 0 0 1 0.2500000 0.2500000 0.5000000
0 0 -1 0 -1 0 -1 0 0 0.2500000 0.2500000 0.5000000
0 0 1 0 1 0 1 0 0 0.2500000 0.2500000 0.5000000
-1 0 0 0 -1 0 0 0 1 0.2500000 0.2500000 0.5000000
1 0 0 0 1 0 0 0 -1 0.2500000 0.2500000 0.5000000
0 0 1 0 -1 0 1 0 0 0.2500000 0.2500000 0.5000000
0 0 -1 0 1 0 -1 0 0 0.2500000 0.2500000 0.5000000
0 1 0 0 0 1 1 0 0 0.2500000 0.2500000 0.5000000
0 -1 0 0 0 -1 -1 0 0 0.2500000 0.2500000 0.5000000
0 1 0 1 0 0 0 0 -1 0.2500000 0.2500000 0.5000000
0 -1 0 -1 0 0 0 0 1 0.2500000 0.2500000 0.5000000
0 1 0 0 0 -1 -1 0 0 0.2500000 0.2500000 0.5000000
0 -1 0 0 0 1 1 0 0 0.2500000 0.2500000 0.5000000
0 1 0 -1 0 0 0 0 1 0.2500000 0.2500000 0.5000000
0 -1 0 1 0 0 0 0 -1 0.2500000 0.2500000 0.5000000
0 -1 0 0 0 -1 1 0 0 0.2500000 0.2500000 0.5000000
0 1 0 0 0 1 -1 0 0 0.2500000 0.2500000 0.5000000
0 -1 0 -1 0 0 0 0 -1 0.2500000 0.2500000 0.5000000
0 1 0 1 0 0 0 0 1 0.2500000 0.2500000 0.5000000
0 -1 0 0 0 1 -1 0 0 0.2500000 0.2500000 0.5000000
0 1 0 0 0 -1 1 0 0 0.2500000 0.2500000 0.5000000
0 -1 0 1 0 0 0 0 1 0.2500000 0.2500000 0.5000000
0 1 0 -1 0 0 0 0 -1 0.2500000 0.2500000 0.5000000
0 0 1 1 0 0 0 1 0 0.2500000 0.2500000 0.5000000
0 0 -1 -1 0 0 0 -1 0 0.2500000 0.2500000 0.5000000
1 0 0 0 0 -1 0 1 0 0.2500000 0.2500000 0.5000000
-1 0 0 0 0 1 0 -1 0 0.2500000 0.2500000 0.5000000
0 0 -1 -1 0 0 0 1 0 0.2500000 0.2500000 0.5000000
0 0 1 1 0 0 0 -1 0 0.2500000 0.2500000 0.5000000
-1 0 0 0 0 1 0 1 0 0.2500000 0.2500000 0.5000000
1 0 0 0 0 -1 0 -1 0 0.2500000 0.2500000 0.5000000
0 0 -1 1 0 0 0 -1 0 0.2500000 0.2500000 0.5000000
0 0 1 -1 0 0 0 1 0 0.2500000 0.2500000 0.5000000
-1 0 0 0 0 -1 0 -1 0 0.2500000 0.2500000 0.5000000
1 0 0 0 0 1 0 1 0 0.2500000 0.2500000 0.5000000
0 0 1 -1 0 0 0 -1 0 0.2500000 0.2500000 0.5000000
0 0 -1 1 0 0 0 1 0 0.2500000 0.2500000 0.5000000
1 0 0 0 0 1 0 -1 0 0.2500000 0.2500000 0.5000000
-1 0 0 0 0 -1 0 1 0 0.2500000 0.2500000 0.5000000
1 0 0 0 1 0 0 0 1 0.0000000 0.0000000 0.5000000
-1 0 0 0 -1 0 0 0 -1 0.0000000 0.0000000 0.5000000
0 0 -1 0 1 0 1 0 0 0.0000000 0.0000000 0.5000000
0 0 1 0 -1 0 -1 0 0 0.0000000 0.0000000 0.5000000
-1 0 0 0 1 0 0 0 -1 0.0000000 0.0000000 0.5000000
1 0 0 0 -1 0 0 0 1 0.0000000 0.0000000 0.5000000
0 0 1 0 1 0 -1 0 0 0.0000000 0.0000000 0.5000000
0 0 -1 0 -1 0 1 0 0 0.0000000 0.0000000 0.5000000
1 0 0 0 -1 0 0 0 -1 0.0000000 0.0000000 0.5000000
-1 0 0 0 1 0 0 0 1 0.0000000 0.0000000 0.5000000
0 0 -1 0 -1 0 -1 0 0 0.0000000 0.0000000 0.5000000
0 0 1 0 1 0 1 0 0 0.0000000 0.0000000 0.5000000
-1 0 0 0 -1 0 0 0 1 0.0000000 0.0000000 0.5000000
1 0 0 0 1 0 0 0 -1 0.0000000 0.0000000 0.5000000
0 0 1 0 -1 0 1 0 0 0.0000000 0.0000000 0.5000000
0 0 -1 0 1 0 -1 0 0 0.0000000 0.0000000 0.5000000
0 1 0 0 0 1 1 0 0 0.0000000 0.0000000 0.5000000
0 -1 0 0 0 -1 -1 0 0 0.0000000 0.0000000 0.5000000
0 1 0 1 0 0 0 0 -1 0.0000000 0.0000000 0.5000000
0 -1 0 -1 0 0 0 0 1 0.0000000 0.0000000 0.5000000
0 1 0 0 0 -1 -1 0 0 0.0000000 0.0000000 0.5000000
0 -1 0 0 0 1 1 0 0 0.0000000 0.0000000 0.5000000
0 1 0 -1 0 0 0 0 1 0.0000000 0.0000000 0.5000000
0 -1 0 1 0 0 0 0 -1 0.0000000 0.0000000 0.5000000
0 -1 0 0 0 -1 1 0 0 0.0000000 0.0000000 0.5000000
0 1 0 0 0 1 -1 0 0 0.0000000 0.0000000 0.5000000
0 -1 0 -1 0 0 0 0 -1 0.0000000 0.0000000 0.5000000
0 1 0 1 0 0 0 0 1 0.0000000 0.0000000 0.5000000
0 -1 0 0 0 1 -1 0 0 0.0000000 0.0000000 0.5000000
0 1 0 0 0 -1 1 0 0 0.0000000 0.0000000 0.5000000
0 -1 0 1 0 0 0 0 1 0.0000000 0.0000000 0.5000000
0 1 0 -1 0 0 0 0 -1 0.0000000 0.0000000 0.5000000
0 0 1 1 0 0 0 1 0 0.0000000 0.0000000 0.5000000
0 0 -1 -1 0 0 0 -1 0 0.0000000 0.0000000 0.5000000
1 0 0 0 0 -1 0 1 0 0.0000000 0.0000000 0.5000000
-1 0 0 0 0 1 0 -1 0 0.0000000 0.0000000 0.5000000
0 0 -1 -1 0 0 0 1 0 0.0000000 0.0000000 0.5000000
0 0 1 1 0 0 0 -1 0 0.0000000 0.0000000 0.5000000
-1 0 0 0 0 1 0 1 0 0.0000000 0.0000000 0.5000000
1 0 0 0 0 -1 0 -1 0 0.0000000 0.0000000 0.5000000
0 0 -1 1 0 0 0 -1 0 0.0000000 0.0000000 0.5000000
0 0 1 -1 0 0 0 1 0 0.0000000 0.0000000 0.5000000
-1 0 0 0 0 -1 0 -1 0 0.0000000 0.0000000 0.5000000
1 0 0 0 0 1 0 1 0 0.0000000 0.0000000 0.5000000
0 0 1 -1 0 0 0 -1 0 0.0000000 0.0000000 0.5000000
0 0 -1 1 0 0 0 1 0 0.0000000 0.0000000 0.5000000
1 0 0 0 0 1 0 -1 0 0.0000000 0.0000000 0.5000000
-1 0 0 0 0 -1 0 1 0 0.0000000 0.0000000 0.5000000
1 0 0 0 1 0 0 0 1 0.7500000 0.7500000 0.5000000
-1 0 0 0 -1 0 0 0 -1 0.7500000 0.7500000 0.5000000
0 0 -1 0 1 0 1 0 0 0.7500000 0.7500000 0.5000000
0 0 1 0 -1 0 -1 0 0 0.7500000 0.7500000 0.5000000
-1 0 0 0 1 0 0 0 -1 0.7500000 0.7500000 0.5000000
1 0 0 0 -1 0 0 0 1 0.7500000 0.7500000 0.5000000
0 0 1 0 1 0 -1 0 0 0.7500000 0.7500000 0.5000000
0 0 -1 0 -1 0 1 0 0 0.7500000 0.7500000 0.5000000
1 0 0 0 -1 0 0 0 -1 0.7500000 0.7500000 0.5000000
-1 0 0 0 1 0 0 0 1 0.7500000 0.7500000 0.5000000
0 0 -1 0 -1 0 -1 0 0 0.7500000 0.7500000 0.5000000
0 0 1 0 1 0 1 0 0 0.7500000 0.7500000 0.5000000
-1 0 0 0 -1 0 0 0 1 0.7500000 0.7500000 0.5000000
1 0 0 0 1 0 0 0 -1 0.7500000 0.7500000 0.5000000
0 0 1 0 -1 0 1 0 0 0.7500000 0.7500000 0.5000000
0 0 -1 0 1 0 -1 0 0 0.7500000 0.7500000 0.5000000
0 1 0 0 0 1 1 0 0 0.7500000 0.7500000 0.5000000
0 -1 0 0 0 -1 -1 0 0 0.7500000 0.7500000 0.5000000
0 1 0 1 0 0 0 0 -1 0.7500000 0.7500000 0.5000000
0 -1 0 -1 0 0 0 0 1 0.7500000 0.7500000 0.5000000
0 1 0 0 0 -1 -1 0 0 0.7500000 0.7500000 0.5000000
0 -1 0 0 0 1 1 0 0 0.7500000 0.7500000 0.5000000
0 1 0 -1 0 0 0 0 1 0.7500000 0.7500000 0.5000000
0 -1 0 1 0 0 0 0 -1 0.7500000 0.7500000 0.5000000
0 -1 0 0 0 -1 1 0 0 0.7500000 0.7500000 0.5000000
0 1 0 0 0 1 -1 0 0 0.7500000 0.7500000 0.5000000
0 -1 0 -1 0 0 0 0 -1 0.7500000 0.7500000 0.5000000
0 1 0 1 0 0 0 0 1 0.7500000 0.7500000 0.5000000
0 -1 0 0 0 1 -1 0 0 0.7500000 0.7500000 0.5000000
0 1 0 0 0 -1 1 0 0 0.7500000 0.7500000 0.5000000
0 -1 0 1 0 0 0 0 1 0.7500000 0.7500000 0.5000000
0 1 0 -1 0 0 0 0 -1 0.7500000 0.7500000 0.5000000
0 0 1 1 0 0 0 1 0 0.7500000 0.7500000 0.5000000
0 0 -1 -1 0 0 0 -1 0 0.7500000 0.7500000 0.5000000
1 0 0 0 0 -1 0 1 0 0.7500000 0.7500000 0.5000000
-1 0 0 0 0 1 0 -1 0 0.7500000 0.7500000 0.5000000
0 0 -1 -1 0 0 0 1 0 0.7500000 0.7500000 0.5000000
0 0 1 1 0 0 0 -1 0 0.7500000 0.7500000 0.5000000
-1 0 0 0 0 1 0 1 0 0.7500000 0.7500000 0.5000000
1 0 0 0 0 -1 0 -1 0 0.7500000 0.7500000 0.5000000
0 0 -1 1 0 0 0 -1 0 0.7500000 0.7500000 0.5000000
0 0 1 -1 0 0 0 1 0 0.7500000 0.7500000 0.5000000
-1 0 0 0 0 -1 0 -1 0 0.7500000 0.7500000 0.5000000
1 0 0 0 0 1 0 1 0 0.7500000 0.7500000 0.5000000
0 0 1 -1 0 0 0 -1 0 0.7500000 0.7500000 0.5000000
0 0 -1 1 0 0 0 1 0 0.7500000 0.7500000 0.5000000
1 0 0 0 0 1 0 -1 0 0.7500000 0.7500000 0.5000000
-1 0 0 0 0 -1 0 1 0 0.7500000 0.7500000 0.5000000
1 0 0 0 1 0 0 0 1 0.5000000 0.5000000 0.5000000
-1 0 0 0 -1 0 0 0 -1 0.5000000 0.5000000 0.5000000
0 0 -1 0 1 0 1 0 0 0.5000000 0.5000000 0.5000000
0 0 1 0 -1 0 -1 0 0 0.5000000 0.5000000 0.5000000
-1 0 0 0 1 0 0 0 -1 0.5000000 0.5000000 0.5000000
1 0 0 0 -1 0 0 0 1 0.5000000 0.5000000 0.5000000
0 0 1 0 1 0 -1 0 0 0.5000000 0.5000000 0.5000000
0 0 -1 0 -1 0 1 0 0 0.5000000 0.5000000 0.5000000
1 0 0 0 -1 0 0 0 -1 0.5000000 0.5000000 0.5000000
-1 0 0 0 1 0 0 0 1 0.5000000 0.5000000 0.5000000
0 0 -1 0 -1 0 -1 0 0 0.5000000 0.5000000 0.5000000
0 0 1 0 1 0 1 0 0 0.5000000 0.5000000 0.5000000
-1 0 0 0 -1 0 0 0 1 0.5000000 0.5000000 0.5000000
1 0 0 0 1 0 0 0 -1 0.5000000 0.5000000 0.5000000
0 0 1 0 -1 0 1 0 0 0.5000000 0.5000000 0.5000000
0 0 -1 0 1 0 -1 0 0 0.5000000 0.5000000 0.5000000
0 1 0 0 0 1 1 0 0 0.5000000 0.5000000 0.5000000
0 -1 0 0 0 -1 -1 0 0 0.5000000 0.5000000 0.5000000
0 1 0 1 0 0 0 0 -1 0.5000000 0.5000000 0.5000000
0 -1 0 -1 0 0 0 0 1 0.5000000 0.5000000 0.5000000
0 1 0 0 0 -1 -1 0 0 0.5000000 0.5000000 0.5000000
0 -1 0 0 0 1 1 0 0 0.5000000 0.5000000 0.5000000
0 1 0 -1 0 0 0 0 1 0.5000000 0.5000000 0.5000000
0 -1 0 1 0 0 0 0 -1 0.5000000 0.5000000 0.5000000
0 -1 0 0 0 -1 1 0 0 0.5000000 0.5000000 0.5000000
0 1 0 0 0 1 -1 0 0 0.5000000 0.5000000 0.5000000
0 -1 0 -1 0 0 0 0 -1 0.5000000 0.5000000 0.5000000
0 1 0 1 0 0 0 0 1 0.5000000 0.5000000 0.5000000
0 -1 0 0 0 1 -1 0 0 0.5000000 0.5000000 0.5000000
0 1 0 0 0 -1 1 0 0 0.5000000 0.5000000 0.5000000
0 -1 0 1 0 0 0 0 1 0.5000000 0.5000000 0.5000000
0 1 0 -1 0 0 0 0 -1 0.5000000 0.5000000 0.5000000
0 0 1 1 0 0 0 1 0 0.5000000 0.5000000 0.5000000
0 0 -1 -1 0 0 0 -1 0 0.5000000 0.5000000 0.5000000
1 0 0 0 0 -1 0 1 0 0.5000000 0.5000000 0.5000000
-1 0 0 0 0 1 0 -1 0 0.5000000 0.5000000 0.5000000
0 0 -1 -1 0 0 0 1 0 0.5000000 0.5000000 0.5000000
0 0 1 1 0 0 0 -1 0 0.5000000 0.5000000 0.5000000
-1 0 0 0 0 1 0 1 0 0.5000000 0.5000000 0.5000000
1 0 0 0 0 -1 0 -1 0 0.5000000 0.5000000 0.5000000
0 0 -1 1 0 0 0 -1 0 0.5000000 0.5000000 0.5000000
0 0 1 -1 0 0 0 1 0 0.5000000 0.5000000 0.5000000
-1 0 0 0 0 -1 0 -1 0 0.5000000 0.5000000 0.5000000
1 0 0 0 0 1 0 1 0 0.5000000 0.5000000 0.5000000
0 0 1 -1 0 0 0 -1 0 0.5000000 0.5000000 0.5000000
0 0 -1 1 0 0 0 1 0 0.5000000 0.5000000 0.5000000
1 0 0 0 0 1 0 -1 0 0.5000000 0.5000000 0.5000000
-1 0 0 0 0 -1 0 1 0 0.5000000 0.5000000 0.5000000
1 0 0 0 1 0 0 0 1 0.5000000 0.2500000 0.2500000
-1 0 0 0 -1 0 0 0 -1 0.5000000 0.2500000 0.2500000
0 0 -1 0 1 0 1 0 0 0.5000000 0.2500000 0.2500000
0 0 1 0 -1 0 -1 0 0 0.5000000 0.2500000 0.2500000
-1 0 0 0 1 0 0 0 -1 0.5000000 0.2500000 0.2500000
1 0 0 0 -1 0 0 0 1 0.5000000 0.2500000 0.2500000
0 0 1 0 1 0 -1 0 0 0.5000000 0.2500000 0.2500000
0 0 -1 0 -1 0 1 0 0 0.5000000 0.2500000 0.2500000
1 0 0 0 -1 0 0 0 -1 0.5000000 0.2500000 0.2500000
-1 0 0 0 1 0 0 0 1 0.5000000 0.2500000 0.2500000
0 0 -1 0 -1 0 -1 0 0 0.5000000 0.2500000 0.2500000
0 0 1 0 1 0 1 0 0 0.5000000 0.2500000 0.2500000
-1 0 0 0 -1 0 0 0 1 0.5000000 0.2500000 0.2500000
1 0 0 0 1 0 0 0 -1 0.5000000 0.2500000 0.2500000
0 0 1 0 -1 0 1 0 0 0.5000000 0.2500000 0.2500000
0 0 -1 0 1 0 -1 0 0 0.5000000 0.2500000 0.2500000
0 1 0 0 0 1 1 0 0 0.5000000 0.2500000 0.2500000
0 -1 0 0 0 -1 -1 0 0 0.5000000 0.2500000 0.2500000
0 1 0 1 0 0 0 0 -1 0.5000000 0.2500000 0.2500000
0 -1 0 -1 0 0 0 0 1 0.5000000 0.2500000 0.2500000
0 1 0 0 0 -1 -1 0 0 0.5000000 0.2500000 0.2500000
0 -1 0 0 0 1 1 0 0 0.5000000 0.2500000 0.2500000
0 1 0 -1 0 0 0 0 1 0.5000000 0.2500000 0.2500000
0 -1 0 1 0 0 0 0 -1 0.5000000 0.2500000 0.2500000
0 -1 0 0 0 -1 1 0 0 0.5000000 0.2500000 0.2500000
0 1 0 0 0 1 -1 0 0 0.5000000 0.2500000 0.2500000
0 -1 0 -1 0 0 0 0 -1 0.5000000 0.2500000 0.2500000
0 1 0 1 0 0 0 0 1 0.5000000 0.2500000 0.2500000
0 -1 0 0 0 1 -1 0 0 0.5000000 0.2500000 0.2500000
0 1 0 0 0 -1 1 0 0 0.5000000 0.2500000 0.2500000
0 -1 0 1 0 0 0 0 1 0.5000000 0.2500000 0.2500000
0 1 0 -1 0 0 0 0 -1 0.5000000 0.2500000 0.2500000
0 0 1 1 0 0 0 1 0 0.5000000 0.2500000 0.2500000
0 0 -1 -1 0 0 0 -1 0 0.5000000 0.2500000 0.2500000
1 0 0 0 0 -1 0 1 0 0.5000000 0.2500000 0.2500000
-1 0 0 0 0 1 0 -1 0 0.5000000 0.2500000 0.2500000
0 0 -1 -1 0 0 0 1 0 0.5000000 0.2500000 0.2500000
0 0 1 1 0 0 0 -1 0 0.5000000 0.2500000 0.2500000
-1 0 0 0 0 1 0 1 0 0.5000000 0.2500000 0.2500000
1 0 0 0 0 -1 0 -1 0 0.5000000 0.2500000 0.2500000
0 0 -1 1 0 0 0 -1 0 0.5000000 0.2500000 0.2500000
0 0 1 -1 0 0 0 1 0 0.5000000 0.2500000 0.2500000
-1 0 0 0 0 -1 0 -1 0 0.5000000 0.2500000 0.2500000
1 0 0 0 0 1 0 1 0 0.5000000 0.2500000 0.2500000
0 0 1 -1 0 0 0 -1 0 0.5000000 0.2500000 0.2500000
0 0 -1 1 0 0 0 1 0 0.5000000 0.2500000 0.2500000
1 0 0 0 0 1 0 -1 0 0.5000000 0.2500000 0.2500000
-1 0 0 0 0 -1 0 1 0 0.5000000 0.2500000 0.2500000
1 0 0 0 1 0 0 0 1 0.2500000 0.0000000 0.2500000
-1 0 0 0 -1 0 0 0 -1 0.2500000 0.0000000 0.2500000
0 0 -1 0 1 0 1 0 0 0.2500000 0.0000000 0.2500000
0 0 1 0 -1 0 -1 0 0 0.2500000 0.0000000 0.2500000
-1 0 0 0 1 0 0 0 -1 0.2500000 0.0000000 0.2500000
1 0 0 0 -1 0 0 0 1 0.2500000 0.0000000 0.2500000
0 0 1 0 1 0 -1 0 0 0.2500000 0.0000000 0.2500000
0 0 -1 0 -1 0 1 0 0 0.2500000 0.0000000 0.2500000
1 0 0 0 -1 0 0 0 -1 0.2500000 0.0000000 0.2500000
-1 0 0 0 1 0 0 0 1 0.2500000 0.0000000 0.2500000
0 0 -1 0 -1 0 -1 0 0 0.2500000 0.0000000 0.2500000
0 0 1 0 1 0 1 0 0 0.2500000 0.0000000 0.2500000
-1 0 0 0 -1 0 0 0 1 0.2500000 0.0000000 0.2500000
1 0 0 0 1 0 0 0 -1 0.2500000 0.0000000 0.2500000
0 0 1 0 -1 0 1 0 0 0.2500000 0.0000000 0.2500000
0 0 -1 0 1 0 -1 0 0 0.2500000 0.0000000 0.2500000
0 1 0 0 0 1 1 0 0 0.2500000 0.0000000 0.2500000
0 -1 0 0 0 -1 -1 0 0 0.2500000 0.0000000 0.2500000
0 1 0 1 0 0 0 0 -1 0.2500000 0.0000000 0.2500000
0 -1 0 -1 0 0 0 0 1 0.2500000 0.0000000 0.2500000
0 1 0 0 0 -1 -1 0 0 0.2500000 0.0000000 0.2500000
0 -1 0 0 0 1 1 0 0 0.2500000 0.0000000 0.2500000
0 1 0 -1 0 0 0 0 1 0.2500000 0.0000000 0.2500000
0 -1 0 1 0 0 0 0 -1 0.2500000 0.0000000 0.2500000
0 -1 0 0 0 -1 1 0 0 0.2500000 0.0000000 0.2500000
0 1 0 0 0 1 -1 0 0 0.2500000 0.0000000 0.2500000
0 -1 0 -1 0 0 0 0 -1 0.2500000 0.0000000 0.2500000
0 1 0 1 0 0 0 0 1 0.2500000 0.0000000 0.2500000
0 -1 0 0 0 1 -1 0 0 0.2500000 0.0000000 0.2500000
0 1 0 0 0 -1 1 0 0 0.2500000 0.0000000 0.2500000
0 -1 0 1 0 0 0 0 1 0.2500000 0.0000000 0.2500000
0 1 0 -1 0 0 0 0 -1 0.2500000 0.0000000 0.2500000
0 0 1 1 0 0 0 1 0 0.2500000 0.0000000 0.2500000
0 0 -1 -1 0 0 0 -1 0 0.2500000 0.0000000 0.2500000
1 0 0 0 0 -1 0 1 0 0.2500000 0.0000000 0.2500000
-1 0 0 0 0 1 0 -1 0 0.2500000 0.0000000 0.2500000
0 0 -1 -1 0 0 0 1 0 0.2500000 0.0000000 0.2500000
0 0 1 1 0 0 0 -1 0 0.2500000 0.0000000 0.2500000
-1 0 0 0 0 1 0 1 0 0.2500000 0.0000000 0.2500000
1 0 0 0 0 -1 0 -1 0 0.2500000 0.0000000 0.2500000
0 0 -1 1 0 0 0 -1 0 0.2500000 0.0000000 0.2500000
0 0 1 -1 0 0 0 1 0 0.2500000 0.0000000 0.2500000
-1 0 0 0 0 -1 0 -1 0 0.2500000 0.0000000 0.2500000
1 0 0 0 0 1 0 1 0 0.2500000 0.0000000 0.2500000
0 0 1 -1 0 0 0 -1 0 0.2500000 0.0000000 0.2500000
0 0 -1 1 0 0 0 1 0 0.2500000 0.0000000 0.2500000
1 0 0 0 0 1 0 -1 0 0.2500000 0.0000000 0.2500000
-1 0 0 0 0 -1 0 1 0 0.2500000 0.0000000 0.2500000
1 0 0 0 1 0 0 0 1 0.0000000 0.7500000 0.2500000
-1 0 0 0 -1 0 0 0 -1 0.0000000 0.7500000 0.2500000
0 0 -1 0 1 0 1 0 0 0.0000000 0.7500000 0.2500000
0 0 1 0 -1 0 -1 0 0 0.0000000 0.7500000 0.2500000
-1 0 0 0 1 0 0 0 -1 0.0000000 0.7500000 0.2500000
1 0 0 0 -1 0 0 0 1 0.0000000 0.7500000 0.2500000
0 0 1 0 1 0 -1 0 0 0.0000000 0.7500000 0.2500000
0 0 -1 0 -1 0 1 0 0 0.0000000 0.7500000 0.2500000
1 0 0 0 -1 0 0 0 -1 0.0000000 0.7500000 0.2500000
-1 0 0 0 1 0 0 0 1 0.0000000 0.7500000 0.2500000
0 0 -1 0 -1 0 -1 0 0 0.0000000 0.7500000 0.2500000
0 0 1 0 1 0 1 0 0 0.0000000 0.7500000 0.2500000
-1 0 0 0 -1 0 0 0 1 0.0000000 0.7500000 0.2500000
1 0 0 0 1 0 0 0 -1 0.0000000 0.7500000 0.2500000
0 0 1 0 -1 0 1 0 0 0.0000000 0.7500000 0.2500000
0 0 -1 0 1 0 -1 0 0 0.0000000 0.7500000 0.2500000
0 1 0 0 0 1 1 0 0 0.0000000 0.7500000 0.2500000
0 -1 0 0 0 -1 -1 0 0 0.0000000 0.7500000 0.2500000
0 1 0 1 0 0 0 0 -1 0.0000000 0.7500000 0.2500000
0 -1 0 -1 0 0 0 0 1 0.0000000 0.7500000 0.2500000
0 1 0 0 0 -1 -1 0 0 0.0000000 0.7500000 0.2500000
0 -1 0 0 0 1 1 0 0 0.0000000 0.7500000 0.2500000
0 1 0 -1 0 0 0 0 1 0.0000000 0.7500000 0.2500000
0 -1 0 1 0 0 0 0 -1 0.0000000 0.7500000 0.2500000
0 -1 0 0 0 -1 1 0 0 0.0000000 0.7500000 0.2500000
0 1 0 0 0 1 -1 0 0 0.0000000 0.7500000 0.2500000
0 -1 0 -1 0 0 0 0 -1 0.0000000 0.7500000 0.2500000
0 1 0 1 0 0 0 0 1 0.0000000 0.7500000 0.2500000
0 -1 0 0 0 1 -1 0 0 0.0000000 0.7500000 0.2500000
0 1 0 0 0 -1 1 0 0 0.0000000 0.7500000 0.2500000
0 -1 0 1 0 0 0 0 1 0.0000000 0.7500000 0.2500000
0 1 0 -1 0 0 0 0 -1 0.0000000 0.7500000 0.2500000
0 0 1 1 0 0 0 1 0 0.0000000 0.7500000 0.2500000
0 0 -1 -1 0 0 0 -1 0 0.0000000 0.7500000 0.2500000
1 0 0 0 0 -1 0 1 0 0.0000000 0.7500000 0.2500000
-1 0 0 0 0 1 0 -1 0 0.0000000 0.7500000 0.2500000
0 0 -1 -1 0 0 0 1 0 0.0000000 0.7500000 0.2500000
0 0 1 1 0 0 0 -1 0 0.0000000 0.7500000 0.2500000
-1 0 0 0 0 1 0 1 0 0.0000000 0.7500000 0.2500000
1 0 0 0 0 -1 0 -1 0 0.0000000 0.7500000 0.2500000
0 0 -1 1 0 0 0 -1 0 0.0000000 0.7500000 0.2500000
0 0 1 -1 0 0 0 1 0 0.0000000 0.7500000 0.2500000
-1 0 0 0 0 -1 0 -1 0 0.0000000 0.7500000 0.2500000
1 0 0 0 0 1 0 1 0 0.0000000 0.7500000 0.2500000
0 0 1 -1 0 0 0 -1 0 0.0000000 0.7500000 0.2500000
0 0 -1 1 0 0 0 1 0 0.0000000 0.7500000 0.2500000
1 0 0 0 0 1 0 -1 0 0.0000000 0.7500000 0.2500000
-1 0 0 0 0 -1 0 1 0 0.0000000 0.7500000 0.2500000
1 0 0 0 1 0 0 0 1 0.7500000 0.5000000 0.2500000
-1 0 0 0 -1 0 0 0 -1 0.7500000 0.5000000 0.2500000
0 0 -1 0 1 0 1 0 0 0.7500000 0.5000000 0.2500000
0 0 1 0 -1 0 -1 0 0 0.7500000 0.5000000 0.2500000
-1 0 0 0 1 0 0 0 -1 0.7500000 0.5000000 0.2500000
1 0 0 0 -1 0 0 0 1 0.7500000 0.5000000 0.2500000
0 0 1 0 1 0 -1 0 0 0.7500000 0.5000000 0.2500000
0 0 -1 0 -1 0 1 0 0 0.7500000 0.5000000 0.2500000
1 0 0 0 -1 0 0 0 -1 0.7500000 0.5000000 0.2500000
-1 0 0 0 1 0 0 0 1 0.7500000 0.5000000 0.2500000
0 0 -1 0 -1 0 -1 0 0 0.7500000 0.5000000 0.2500000
0 0 1 0 1 0 1 0 0 0.7500000 0.5000000 0.2500000
-1 0 0 0 -1 0 0 0 1 0.7500000 0.5000000 0.2500000
1 0 0 0 1 0 0 0 -1 0.7500000 0.5000000 0.2500000
0 0 1 0 -1 0 1 0 0 0.7500000 0.5000000 0.2500000
0 0 -1 0 1 0 -1 0 0 0.7500000 0.5000000 0.2500000
0 1 0 0 0 1 1 0 0 0.7500000 0.5000000 0.2500000
0 -1 0 0 0 -1 -1 0 0 0.7500000 0.5000000 0.2500000
0 1 0 1 0 0 0 0 -1 0.7500000 0.5000000 0.2500000
0 -1 0 -1 0 0 0 0 1 0.7500000 0.5000000 0.2500000
0 1 0 0 0 -1 -1 0 0 0.7500000 0.5000000 0.2500000
0 -1 0 0 0 1 1 0 0 0.7500000 0.5000000 0.2500000
0 1 0 -1 0 0 0 0 1 0.7500000 0.5000000 0.2500000
0 -1 0 1 0 0 0 0 -1 0.7500000 0.5000000 0.2500000
0 -1 0 0 0 -1 1 0 0 0.7500000 0.5000000 0.2500000
0 1 0 0 0 1 -1 0 0 0.7500000 0.5000000 0.2500000
0 -1 0 -1 0 0 0 0 -1 0.7500000 0.5000000 0.2500000
0 1 0 1 0 0 0 0 1 0.7500000 0.5000000 0.2500000
0 -1 0 0 0 1 -1 0 0 0.7500000 0.5000000 0.2500000
0 1 0 0 0 -1 1 0 0 0.7500000 0.5000000 0.2500000
0 -1 0 1 0 0 0 0 1 0.7500000 0.5000000 0.2500000
0 1 0 -1 0 0 0 0 -1 0.7500000 0.5000000 0.2500000
0 0 1 1 0 0 0 1 0 0.7500000 0.5000000 0.2500000
0 0 -1 -1 0 0 0 -1 0 0.7500000 0.5000000 0.2500000
1 0 0 0 0 -1 0 1 0 0.7500000 0.5000000 0.2500000
-1 0 0 0 0 1 0 -1 0 0.7500000 0.5000000 0.2500000
0 0 -1 -1 0 0 0 1 0 0.7500000 0.5000000 0.2500000
0 0 1 1 0 0 0 -1 0 0.7500000 0.5000000 0.2500000
-1 0 0 0 0 1 0 1 0 0.7500000 0.5000000 0.2500000
1 0 0 0 0 -1 0 -1 0 0.7500000 0.5000000 0.2500000
0 0 -1 1 0 0 0 -1 0 0.7500000 0.5000000 0.2500000
0 0 1 -1 0 0 0 1 0 0.7500000 0.5000000 0.2500000
-1 0 0 0 0 -1 0 -1 0 0.7500000 0.5000000 0.2500000
1 0 0 0 0 1 0 1 0 0.7500000 0.5000000 0.2500000
0 0 1 -1 0 0 0 -1 0 0.7500000 0.5000000 0.2500000
0 0 -1 1 0 0 0 1 0 0.7500000 0.5000000 0.2500000
1 0 0 0 0 1 0 -1 0 0.7500000 0.5000000 0.2500000
-1 0 0 0 0 -1 0 1 0 0.7500000 0.5000000 0.2500000"""
class TestPureTrans(unittest.TestCase):
"""This is a test for new implentation of search_pure_translations in
symmetry.c (ee97ad17) againt a previous version. The order of
symmetry oprations found by this new implementation may be
different from that obtaiend by the older version but the set must
be the same in rotations and very close in translations.
"""
def setUp(self):
cell = read_vasp_from_strings(Al222)
self._sym_ops = get_symmetry(cell)
rot = []
trans = []
for i, line in enumerate(sym_ops_str.split('\n')):
arr = line.split()
rot += [int(x) for x in arr[:9]]
trans += [float(x) for x in arr[9:]]
self._rot_ref = np.reshape(rot, (-1, 3, 3))
self._trans_ref = np.reshape(trans, (-1, 3))
def tearDown(self):
pass
def test_pure_trans(self):
nums = []
for i, (r, t) in enumerate(zip(self._sym_ops['rotations'],
self._sym_ops['translations'])):
for j, (rr, tr) in enumerate(zip(self._rot_ref, self._trans_ref)):
if (r == rr).all() and (np.abs(t - tr) < 1e-5).all():
nums.append(j)
break
np.testing.assert_array_equal(np.sort(nums),
np.arange(len(self._rot_ref)))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestPureTrans)
unittest.TextTestRunner(verbosity=2).run(suite)
# unittest.main()
|
jochym/spglib
|
python/test/test_pure_trans.py
|
Python
|
bsd-3-clause
| 105,250
|
[
"VASP"
] |
cc00c9ed817f7a1ac6b05895df9df03bee3964d6e298bfa10c28eb3ea17561be
|
#!/usr/bin/env python
#register files to LFN;
#if files are not under /cefs; copy to /cefs/dirac/user/{first-char}/{user-name}/jsub/{mother-dir}/ (don't append full path because of max depth limit of LFN)
#LFN = /{vo}/user/{first-char}/{user-name}/ + full_path
import os
from DIRAC import S_OK, S_ERROR, gLogger, exit
from DIRAC.Core.Base import Script
Script.setUsageMessage('Register files to DFC')
Script.parseCommandLine(ignoreErrors=False)
from DIRAC.Core.Utilities.Adler import fileAdler
from DIRAC.Core.Utilities.File import makeGuid
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
fcc = FileCatalogClient('DataManagement/FileCatalog')
files = Script.getPositionalArgs()
_bufferSize = 100
_se = 'IHEP-STORM'
overwrite=False
def main():
dm = DataManager()
fileTupleBuffer = []
res = getProxyInfo( False, False )
if not res['OK']:
gLogger.error( "Failed to get client proxy information.", res['Message'] )
DIRAC.exit( 2 )
proxyInfo = res['Value']
if proxyInfo['secondsLeft'] == 0:
gLogger.error( "Proxy expired" )
DIRAC.exit( 2 )
username = proxyInfo['username']
vo = ''
if 'group' in proxyInfo:
vo = getVOForGroup( proxyInfo['group'] )
counter = 0
for f in files:
counter += 1
local_f=f
if not f.startswith('/cefs'):
# gLogger.error('File must be under "/cefs"')
# continue
#if the file to reg is not under /cefs, use put and register
folder_name=os.path.basename(os.path.dirname(f))
# lfn = '/cepc/user/%s/%s/jsub/'%(username[0],username) + folder_name + '/' + os.path.basename(f)
lfn = '/cepc/user/%s/%s/jsub/'%(username[0],username) + folder_name + '/' + os.path.basename(f)
# dirname = os.path.dirname(local_f)
# os.system('mkdir -p %s'%(dirname))
# os.system('cp %s %s' %(f,local_f))
do_put_and_register=True
else:
lfn = '/cepc/lustre-ro' + os.path.abspath(f)
do_put_and_register=False
result = fcc.isFile(lfn)
if result['OK'] and lfn in result['Value']['Successful'] and result['Value']['Successful'][lfn]:
continue
size = os.path.getsize(f)
adler32 = fileAdler(f)
guid = makeGuid()
fileTuple = (lfn, local_f, size, _se, guid, adler32)
fileTupleBuffer.append(fileTuple)
gLogger.debug('Register to lfn: %s' % lfn)
gLogger.debug('fileTuple: %s' % (fileTuple,))
if len(fileTupleBuffer) >= _bufferSize:
if do_put_and_register:
result = dm.putAndRegister(lfn, local_f, _se, guid, overwrite=overwrite)
else:
result = dm.registerFile(fileTupleBuffer)
print('register result', result)
# if not result['OK']:
# gLogger.error('Register file failed')
# return 1
del fileTupleBuffer[:]
gLogger.debug('%s files registered' % counter)
if fileTupleBuffer:
if do_put_and_register:
result = dm.putAndRegister(lfn, local_f, _se, guid, overwrite=overwrite)
else:
result = dm.registerFile(fileTupleBuffer)
print('register result', result)
# if not result['OK']:
# gLogger.error('Register file failed')
# return 1
del fileTupleBuffer[:]
gLogger.info('Totally %s files registered' % counter)
return 0
if __name__ == '__main__':
exit(main())
|
jsubpy/jsub
|
jsub/scripts/dirac-register.py
|
Python
|
mit
| 3,758
|
[
"DIRAC"
] |
92a9565e2d3eec45e1e1f837b12959073068089f1ca2c1e44e9ac454548cdb99
|
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Activation
from keras.layers import Masking
from keras.optimizers import RMSprop
from keras import backend as k
from sklearn.preprocessing import normalize
"""
Discrete log-likelihood for Weibull hazard function on censored survival data
y_true is a (samples, 2) tensor containing time-to-event (y), and an event indicator (u)
ab_pred is a (samples, 2) tensor containing predicted Weibull alpha (a) and beta (b) parameters
For math, see https://ragulpr.github.io/assets/draft_master_thesis_martinsson_egil_wtte_rnn_2016.pdf (Page 35)
"""
def weibull_loglik_discrete(y_true, ab_pred, name=None):
y_ = y_true[:, 0]
u_ = y_true[:, 1]
a_ = ab_pred[:, 0]
b_ = ab_pred[:, 1]
hazard0 = k.pow((y_ + 1e-35) / a_, b_)
hazard1 = k.pow((y_ + 1) / a_, b_)
return -1 * k.mean(u_ * k.log(k.exp(hazard1 - hazard0) - 1.0) - hazard1)
"""
Not used for this model, but included in case somebody needs it
For math, see https://ragulpr.github.io/assets/draft_master_thesis_martinsson_egil_wtte_rnn_2016.pdf (Page 35)
"""
def weibull_loglik_continuous(y_true, ab_pred, name=None):
y_ = y_true[:, 0]
u_ = y_true[:, 1]
a_ = ab_pred[:, 0]
b_ = ab_pred[:, 1]
ya = (y_ + 1e-35) / a_
return -1 * k.mean(u_ * (k.log(b_) + b_ * k.log(ya)) - k.pow(ya, b_))
"""
Custom Keras activation function, outputs alpha neuron using exponentiation and beta using softplus
"""
def activate(ab):
a = k.exp(ab[:, 0])
b = k.softplus(ab[:, 1])
a = k.reshape(a, (k.shape(a)[0], 1))
b = k.reshape(b, (k.shape(b)[0], 1))
return k.concatenate((a, b), axis=1)
"""
Load and parse engine data files into:
- an (engine/day, observed history, sensor readings) x tensor, where observed history is 100 days, zero-padded
for days that don't have a full 100 days of observed history (e.g., first observed day for an engine)
- an (engine/day, 2) tensor containing time-to-event and 1 (since all engines failed)
There are probably MUCH better ways of doing this, but I don't use Numpy that much, and the data parsing isn't the
point of this demo anyway.
"""
def load_file(name):
with open(name, 'r') as file:
return np.loadtxt(file, delimiter=',')
np.set_printoptions(suppress=True, threshold=10000)
train = load_file('train.csv')
test_x = load_file('test_x.csv')
test_y = load_file('test_y.csv')
# Combine the X values to normalize them, then split them back out
all_x = np.concatenate((train[:, 2:26], test_x[:, 2:26]))
all_x = normalize(all_x, axis=0)
train[:, 2:26] = all_x[0:train.shape[0], :]
test_x[:, 2:26] = all_x[train.shape[0]:, :]
# Make engine numbers and days zero-indexed, for everybody's sanity
train[:, 0:2] -= 1
test_x[:, 0:2] -= 1
# Configurable observation look-back period for each engine/day
max_time = 100
def build_data(engine, time, x, max_time, is_test):
# y[0] will be days remaining, y[1] will be event indicator, always 1 for this data
out_y = np.empty((0, 2), dtype=np.float32)
# A full history of sensor readings to date for each x
out_x = np.empty((0, max_time, 24), dtype=np.float32)
for i in range(100):
print("Engine: " + str(i))
# When did the engine fail? (Last day + 1 for train data, irrelevant for test.)
max_engine_time = int(np.max(time[engine == i])) + 1
if is_test:
start = max_engine_time - 1
else:
start = 0
this_x = np.empty((0, max_time, 24), dtype=np.float32)
for j in range(start, max_engine_time):
engine_x = x[engine == i]
out_y = np.append(out_y, np.array((max_engine_time - j, 1), ndmin=2), axis=0)
xtemp = np.zeros((1, max_time, 24))
xtemp[:, max_time-min(j, 99)-1:max_time, :] = engine_x[max(0, j-max_time+1):j+1, :]
this_x = np.concatenate((this_x, xtemp))
out_x = np.concatenate((out_x, this_x))
return out_x, out_y
train_x, train_y = build_data(train[:, 0], train[:, 1], train[:, 2:26], max_time, False)
test_x = build_data(test_x[:, 0], test_x[:, 1], test_x[:, 2:26], max_time, True)[0]
train_u = np.zeros((100, 1), dtype=np.float32)
train_u += 1
test_y = np.append(np.reshape(test_y, (100, 1)), train_u, axis=1)
"""
Here's the rest of the meat of the demo... actually fitting and training the model.
We'll also make some test predictions so we can evaluate model performance.
"""
# Start building our model
model = Sequential()
# Mask parts of the lookback period that are all zeros (i.e., unobserved) so they don't skew the model
model.add(Masking(mask_value=0., input_shape=(max_time, 24)))
# LSTM is just a common type of RNN. You could also try anything else (e.g., GRU).
model.add(LSTM(20, input_dim=24))
# We need 2 neurons to output Alpha and Beta parameters for our Weibull distribution
model.add(Dense(2))
# Apply the custom activation function mentioned above
model.add(Activation(activate))
# Use the discrete log-likelihood for Weibull survival data as our loss function
model.compile(loss=weibull_loglik_discrete, optimizer=RMSprop(lr=.001))
# Fit!
model.fit(train_x, train_y, nb_epoch=250, batch_size=2000, verbose=2, validation_data=(test_x, test_y))
# Make some predictions and put them alongside the real TTE and event indicator values
test_predict = model.predict(test_x)
test_predict = np.resize(test_predict, (100, 2))
test_result = np.concatenate((test_y, test_predict), axis=1)
# TTE, Event Indicator, Alpha, Beta
print(test_result)
|
daynebatten/keras-wtte-rnn
|
wtte-rnn.py
|
Python
|
mit
| 5,661
|
[
"NEURON"
] |
f432f2bb7968fc206f6e47021f0cd226778f5123609a498b22de8ff70d8dffd7
|
from .agents import *
import utils as u
class AgentXTypeTwoClass(Agent):
def __init__(self, x=2, y=2):
Agent.__init__(self)
##
# Personalize the identifier of this class.
# Will be used instead of the class name
# in neighbours info
self.name = 'AgentXTypeTwo'
# The possible actions of the agent
self.actions = {
0: "GoNorth",
1: "GoWest",
2: "GoSouth",
3: "GoEast",
4: "NoOp"
}
# THe list of walls bumped
self.walls = []
# The list of the visited position
self.visited_floor = []
# The search tree
self.search_tree = [((0, 0), 4)]
# The position visited by an adversary
self.visited_floor_adv = []
# Current action
self.current_action = 4
# Current position
self.position = (0, 0)
def get_coord(action):
"""
Retrieve the normal coordinates and the backtracked one
Args:
- action (int): The action to make
Return:
- (tuple): The new position
"""
if action == 0: # GoNorth
return self.position[0], self.position[1] + 1
elif action == 1: # GoWest
return self.position[0] - 1, self.position[1]
elif action == 2: # GoSouth
return self.position[0], self.position[1] - 1
elif action == 3: # GoEast
return self.position[0] + 1, self.position[1]
def distance_from_other_agents(neighbors):
"""
Calculate the distance from other agents and return the list with the preferred action to make
Args:
neighbors (list): The complete list of the agent
Return:
(list): A list of tuple with a structure like [(distance, [action, ...]), ...]
"""
distances = []
for (agent_id, agent_type), pos in neighbors:
if self.id != agent_id:
dis_from_other_agent = u.distance(self.position, (self.position[0] + pos[0], self.position[1] + pos[1]))
actions = []
if pos[0] < 0:
actions.append(1) # GoWest
elif pos[0] > 0:
actions.append(3) # GoEast
if pos[1] < 0:
actions.append(2) # GoSouth
elif pos[1] > 0:
actions.append(0) # GoNorth
actions.append(random.randint(0, 3))
distances.append((dis_from_other_agent, actions))
return list(sorted(distances, key=lambda elm: elm[0]))
def define_action(neighbors):
"""
Retrieve the action to make. In first time the agent try to take open a new graph (or tree) branch,
if this is not possible then it enter a previously visited branch
Args:
neighbors (list): The list of the neighbors
Return:
(string): the action to make
"""
def decide(action):
"""
Control if the action is possible
Args:
action (int): The action to undertake
Return:
(string) The action to make
(None) If is not possible
"""
coord = get_coord(action)
if coord not in self.walls and coord not in self.visited_floor \
and coord not in self.visited_floor_adv:
# New position
self.position = coord
# New action
self.current_action = action
# Save in the history
self.visited_floor.insert(0, self.position)
self.search_tree.insert(0, (self.position, action))
return self.actions[action]
else:
return None
dis_other_agents = distance_from_other_agents(neighbors)
for dis, actions in dis_other_agents:
# Firstly try the actions calculated with heuristic
for i in actions:
action = decide(i)
if action:
return action
# In this second stage, the agent try to take one of the four action (if it's possible)
for i in range(0, 4):
action = decide(i)
if action:
return action
##
# ====================================================
# Backtracking when there aren't action to make
# ====================================================
if not self.search_tree:
return 'NoOp'
# Retrieve the position and action
(coord_x, coord_y), action = self.search_tree[0]
# Calculate the backtrack action to make
action = (action + 2) % 4
# Remove the first element of search tree
self.search_tree.pop(0)
# Backtrack position
self.position = get_coord(action)
# Backtrack action
self.current_action = action
return self.actions[action]
def retrieve_action(neighbors):
"""
Retrieve an action to make
Args:
neighbors (array): The list of the neighbors
Return:
(string): The action to make
"""
if neighbors:
return define_action(neighbors)
else:
return 'NoOp'
def make_action(status, bump, neighbors):
"""
Select the action to execute
Params:
status (string): 'Dirty' or 'Clean'
bump (string): 'Bump' or 'None'
neighbors (list of tuples): [
( (agent_id, agent_type), (r_x, r_y) ),
...,
...
]
Returns:
(string): one of these commands:
- 'Suck'
- 'GoNorth'
- 'GoSouth'
- 'GoWest'
- 'GoEast'
- 'NoOp' or 'Noop'
"""
# If the search tree is empty, then the agent have finished the visit
if not self.search_tree:
return 'NoOp'
# If the position is dirty, then suck
if status == 'Dirty':
return 'Suck'
# Bumped the wall
if bump == 'Bump':
# Extract the position from the search tree because it can't accessed anymore
if self.search_tree:
self.search_tree.pop(0)
self.walls.append(self.position)
self.position = get_coord((self.current_action + 2) % 4)
# If the agent have bumped the wall or the position is empty, then retrieve the action to make
return retrieve_action(neighbors)
def program(status, bump, neighbors):
"""Main function of the Agent.
Params:
status (string): 'Dirty' or 'Clean'
bump (string): 'Bump' or 'None'
neighbors (list of tuples): [
( (agent_id, agent_type), (r_x, r_y) ),
...,
...
]
Returns:
(string): one of these commands:
- 'Suck'
- 'GoNorth'
- 'GoSouth'
- 'GoWest'
- 'GoEast'
- 'NoOp' or 'Noop'
"""
# Save all the position visited by an other agent as personal visiting
for (agent_id, agent_type), pos in neighbors:
if agent_id != self.id:
self.visited_floor_adv.append((self.position[0] + pos[0], self.position[1] + pos[1]))
return make_action(status, bump, neighbors)
self.program = program
|
DMIunipg/AI-Project-VacuumEnvironment
|
agent_dir/AgentXTypeTwo.py
|
Python
|
apache-2.0
| 8,697
|
[
"VisIt"
] |
b55fe66ea137497f377fa5213880d7a7fc7718990374358716699eea9d98bc25
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import absolute_import
import numpy as np
from MDAnalysisTests import datafiles
from MDAnalysisTests.datafiles import (PDB_small, PDB, PDB_full, LAMMPSdata,
LAMMPSdata2, LAMMPSdcd2,
LAMMPSdata_mini, PSF_TRICLINIC,
DCD_TRICLINIC, PSF_NAMD_TRICLINIC,
DCD_NAMD_TRICLINIC)
class RefAdKSmall(object):
"""Mixin class to provide comparison numbers.
Based on small PDB with AdK (:data:`PDB_small`).
.. Note::
All distances must be in ANGSTROEM as this is the MDAnalysis
default unit. All readers must return Angstroem by default.
"""
filename = datafiles.PDB_small
ref_coordinates = {
# G11:CA, copied frm adk_open.pdb
'A10CA': np.array([-1.198, 7.937, 22.654]),
}
ref_distances = {'endtoend': 11.016959}
ref_E151HA2_index = 2314
ref_n_atoms = 3341
ref_charmm_totalcharge = -4.0
ref_charmm_Hcharges = [0.33] + 203 * [0.31]
ref_charmm_ArgCAcharges = 13 * [0.07]
ref_charmm_ProNcharges = 10 * [-0.29]
ref_unitcell = np.array([80.017, 80.017, 80.017, 60., 60., 90.],
dtype=np.float32)
ref_volume = 0.0
class RefAdK(object):
"""Mixin class to provide comparison numbers.
Based on PDB/GRO with AdK in water + Na+ (:data:`PDB`).
.. Note::
All distances must be in ANGSTROEM as this is the MDAnalysis
default unit. All readers must return Angstroem by default.
"""
filename = datafiles.PDB
ref_coordinates = {
# Angstroem as MDAnalysis unit!!
'A10CA': np.array([62.97600174, 62.08800125, 20.2329998]),
}
ref_distances = {'endtoend': 9.3513174}
ref_E151HA2_index = 2314
ref_n_atoms = 47681
ref_Na_sel_size = 4
# CRYST1 80.017 80.017 80.017 60.00 60.00 90.00
ref_unitcell = np.array([80.017, 80.017, 80.017, 60., 60., 90.],
dtype=np.float32)
ref_volume = 362270.0 # computed with Gromacs
class Ref2r9r(object):
"""Mixin class to provide comparison numbers.
Based on S6 helices of chimeric Kv channel
.. Note::
All distances must be in ANGSTROEM as this is the MDAnalysis
default unit. All readers must return Angstroem by default.
"""
ref_n_atoms = 1284
ref_sum_centre_of_geometry = -98.24146
ref_n_frames = 10
class Ref4e43(object):
"""Mixin class for a clean Protein Databank PDB file"""
filename = datafiles.PDB_full
header = 'HYDROLASE 11-MAR-12 4E43'
title = ['HIV PROTEASE (PR) DIMER WITH ACETATE IN EXO SITE AND PEPTIDE '
'IN ACTIVE', '2 SITE']
compnd = ['MOL_ID: 1;',
'2 MOLECULE: PROTEASE;',
'3 CHAIN: A, B;',
'4 ENGINEERED: YES;',
'5 MUTATION: YES;',
'6 MOL_ID: 2;',
'7 MOLECULE: RANDOM PEPTIDE;',
'8 CHAIN: C;',
'9 ENGINEERED: YES;',
'10 OTHER_DETAILS: UNKNOWN IMPURITY', ]
num_remarks = 333
# only first 5 remarks for comparison
nmax_remarks = 5
remarks = [
'2',
'2 RESOLUTION. 1.54 ANGSTROMS.',
'3',
'3 REFINEMENT.',
'3 PROGRAM : REFMAC 5.5.0110',
]
class RefACHE(object):
"""Mixin class to provide comparison numbers.
ACHE peptide
# COM check in VMD::
set p [atomselect top "not water"]
set total {0 0 0};
for {set i 0} {$i < 11} {incr i} {
$p frame $i; set total [vecadd $total [measure center $p]]}
puts [vecsum $total]
# 472.2592159509659
"""
ref_n_atoms = 252
ref_proteinatoms = ref_n_atoms
ref_sum_centre_of_geometry = 472.2592159509659 # 430.44807815551758
ref_n_frames = 11
ref_periodic = False
class RefCappedAla(object):
"""Mixin class to provide comparison numbers.
Capped Ala in water
# COM check in VMD (load trajectory as *AMBER with periodic box*!)::
set p [atomselect top "not water"]
set total {0 0 0};
for {set i 0} {$i < 11} {incr i} {
$p frame $i; set total [vecadd $total [measure center $p]]}
puts [vecsum $total]
# 686.276834487915
"""
ref_n_atoms = 5071
ref_proteinatoms = 22
ref_sum_centre_of_geometry = 686.276834487915
ref_n_frames = 11
ref_periodic = True
class RefVGV(object):
"""Mixin class to provide comparison numbers.
Computed from bala.trj::
w = MDAnalysis.Universe(PRMncdf, TRJncdf)
ref_n_atoms = len(w.atoms) ref_proteinatoms = len(w.select_atoms("protein"))
ref_sum_centre_of_geometry = np.sum([protein.center_of_geometry()
for ts in w.trajectory])
"""
topology = datafiles.PRMncdf
filename = datafiles.NCDF
ref_n_atoms = 2661
ref_proteinatoms = 50
ref_sum_centre_of_geometry = 1552.9125
ref_n_frames = 30
ref_periodic = True
class RefTZ2(object):
"""Reference values for the cpptraj testcase tz2.truncoct.nc
Used under the GPL v3.
"""
topology = datafiles.PRM7
filename = datafiles.NCDFtruncoct
ref_n_atoms = 5827
ref_proteinatoms = 217
ref_sum_centre_of_geometry = -68.575745
ref_n_frames = 10
ref_periodic = True
class RefTRZ(object):
# ref_coordinates = {}
# ref_distances = {'endtoend': }
ref_n_atoms = 8184
ref_dimensions = np.array([55.422830581665039, 55.422830581665039,
55.422830581665039, 90., 90., 90.],
dtype=np.float32)
ref_volume = 170241.762765
ref_n_frames = 6
ref_coordinates = np.array([72.3163681, -130.31130981, 19.97969055],
dtype=np.float32)
ref_velocities = np.array([[14.83297443, 18.02611542, 6.07733774]],
dtype=np.float32)
ref_delta = 0.001
ref_time = 0.01
ref_title = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ12345678901234'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ12345678901234')
class RefLAMMPSData(object):
filename = LAMMPSdata
n_atoms = 18364
pos_atom1 = np.array([11.89985657, 48.4455719, 19.09719849],
dtype=np.float32)
vel_atom1 = np.array([-0.005667593, 0.00791380978, -0.00300779533],
dtype=np.float32)
dimensions = np.array([55.42282867, 55.42282867, 55.42282867, 90., 90., 90.
],
dtype=np.float32)
class RefLAMMPSDataDCD(object):
format = "LAMMPS"
topology = LAMMPSdata2
trajectory = LAMMPSdcd2
n_atoms = 12421
n_frames = 5
dt = 0.5 # ps per frame
mean_dimensions = np.array(
[ 50.66186142, 47.18824387, 52.33762741,
90. , 90. , 90. ], dtype=np.float32)
class RefLAMMPSDataMini(object):
filename = LAMMPSdata_mini
n_atoms = 1
pos_atom1 = np.array([11.89985657, 48.4455719, 19.09719849],
dtype=np.float32)
vel_atom1 = np.array([-0.005667593, 0.00791380978, -0.00300779533],
dtype=np.float32)
dimensions = np.array([60., 50., 30., 90., 90., 90.], dtype=np.float32)
class RefCHARMMtriclinicDCD(object):
topology = PSF_TRICLINIC
trajectory = DCD_TRICLINIC
# time(ps) A B C alpha beta gamma (length in Angstrome, angles in degrees)
# dcd starts at t = 1ps
ref_dimensions = np.array([
[1., 35.44604, 35.06156, 34.1585, 91.32802, 61.73521, 44.40703],
[2., 34.65957, 34.22689, 33.09897, 90.56206, 61.79192, 44.14549],
[3., 34.52772, 34.66422, 33.53881, 90.55859, 63.11228, 40.14044],
[4., 34.43749, 33.38432, 34.02133, 88.82457, 64.98057, 36.77397],
[5., 33.73129, 32.47752, 34.18961, 89.88102, 65.89032, 36.10921],
[6., 33.78703, 31.90317, 34.98833, 90.03092, 66.12877, 35.07141],
[7., 33.24708, 31.18271, 34.9654, 93.11122, 68.17743, 35.73643],
[8., 32.92599, 30.31393, 34.99197, 93.89051, 69.3799, 33.48945],
[9., 32.15295, 30.43056, 34.96157, 96.01416, 71.50115, 32.56111],
[10., 31.99748, 30.21518, 35.24292, 95.85821, 71.08429, 31.85939]
])
class RefNAMDtriclinicDCD(object):
topology = PSF_NAMD_TRICLINIC
trajectory = DCD_NAMD_TRICLINIC
# vmd topology trajectory
# molinfo 0 get {a b c alpha beta gamma}
# time(ps) A B C alpha beta gamma (length in Angstrome, angles in degrees)
ref_dimensions = np.array([
[1., 38.426594, 38.393101, 44.759800, 90.000000, 90.000000, 60.028915],
])
|
kain88-de/mdanalysis
|
testsuite/MDAnalysisTests/coordinates/reference.py
|
Python
|
gpl-2.0
| 9,750
|
[
"Amber",
"Gromacs",
"LAMMPS",
"MDAnalysis",
"VMD"
] |
8c8fcb56b298ee3fa1c39a7d6cd1416fa9ac72933e9da5df3fa11976ae7bb061
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# pubvgridprojects - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.pubvgridprojs import main
from shared.cgiscriptstub import run_cgi_script_possibly_with_cert
run_cgi_script_possibly_with_cert(main)
|
heromod/migrid
|
mig/cgi-sid/pubvgridprojs.py
|
Python
|
gpl-2.0
| 1,155
|
[
"Brian"
] |
f4ac2e62a79e7d8ca26de19f4d2772d9c12fec7049f769e144c5735cb9f283b3
|
# -*- coding: utf-8 -*-
# TODO: Port to pytest
# PEP8 asserts
from copy import deepcopy
import httplib as http
import time
import mock
import pytest
from nose.tools import * # noqa
from tests.base import OsfTestCase, fake
from osf_tests.factories import (
UserFactory, NodeFactory, ProjectFactory,
AuthUserFactory
)
from addons.wiki.tests.factories import NodeWikiFactory
from website.exceptions import NodeStateError
from addons.wiki import settings
from addons.wiki import views
from addons.wiki.exceptions import InvalidVersionError
from addons.wiki.models import NodeWikiPage, render_content
from addons.wiki.utils import (
get_sharejs_uuid, generate_private_uuid, share_db, delete_share_doc,
migrate_uuid, format_wiki_version, serialize_wiki_settings,
)
from framework.auth import Auth
from framework.mongo.utils import to_mongo_key
from .config import EXAMPLE_DOCS, EXAMPLE_OPS
pytestmark = pytest.mark.django_db
# forward slashes are not allowed, typically they would be replaced with spaces
SPECIAL_CHARACTERS_ALL = u'`~!@#$%^*()-=_+ []{}\|/?.df,;:''"'
SPECIAL_CHARACTERS_ALLOWED = u'`~!@#$%^*()-=_+ []{}\|?.df,;:''"'
class TestWikiViews(OsfTestCase):
def setUp(self):
super(TestWikiViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=True, creator=self.user)
self.consolidate_auth = Auth(user=self.project.creator)
def test_wiki_url_get_returns_200(self):
url = self.project.web_url_for('project_wiki_view', wname='home')
res = self.app.get(url)
assert_equal(res.status_code, 200)
def test_wiki_url_404_with_no_write_permission(self): # and not public
url = self.project.web_url_for('project_wiki_view', wname='somerandomid')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_wiki_deleted_404_with_no_write_permission(self, mock_sharejs):
self.project.update_node_wiki('funpage', 'Version 1', Auth(self.user))
self.project.save()
url = self.project.web_url_for('project_wiki_view', wname='funpage')
res = self.app.get(url)
assert_equal(res.status_code, 200)
delete_url = self.project.api_url_for('project_wiki_delete', wname='funpage')
self.app.delete(delete_url, auth=self.user.auth)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
def test_wiki_url_with_path_get_returns_200(self):
self.project.update_node_wiki('funpage', 'Version 1', Auth(self.user))
self.project.update_node_wiki('funpage', 'Version 2', Auth(self.user))
self.project.save()
url = self.project.web_url_for(
'project_wiki_view',
wname='funpage',
) + '?view&compare=1&edit'
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_wiki_url_with_edit_get_returns_403_with_no_write_permission(self):
self.project.update_node_wiki('funpage', 'Version 1', Auth(self.user))
self.project.update_node_wiki('funpage', 'Version 2', Auth(self.user))
self.project.save()
url = self.project.web_url_for(
'project_wiki_view',
wname='funpage',
compare=1,
)
res = self.app.get(url)
assert_equal(res.status_code, 200)
url = self.project.web_url_for(
'project_wiki_view',
wname='funpage',
) + '?edit'
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 403)
# Check publicly editable
wiki = self.project.get_addon('wiki')
wiki.set_editing(permissions=True, auth=self.consolidate_auth, log=True)
res = self.app.get(url, auth=AuthUserFactory().auth, expect_errors=False)
assert_equal(res.status_code, 200)
# Check publicly editable but not logged in
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_wiki_url_for_pointer_returns_200(self):
# TODO: explain how this tests a pointer
project = ProjectFactory(is_public=True)
self.project.add_pointer(project, Auth(self.project.creator), save=True)
url = self.project.web_url_for('project_wiki_view', wname='home')
res = self.app.get(url)
assert_equal(res.status_code, 200)
@pytest.mark.skip('#TODO: Fix or mock mongodb for sharejs')
def test_wiki_draft_returns_200(self):
url = self.project.api_url_for('wiki_page_draft', wname='somerandomid')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_wiki_content_returns_200(self):
url = self.project.api_url_for('wiki_page_content', wname='somerandomid')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
@mock.patch('addons.wiki.models.NodeWikiPage.rendered_before_update', new_callable=mock.PropertyMock)
def test_wiki_content_rendered_before_update(self, mock_rendered_before_update):
content = 'Some content'
self.project.update_node_wiki('somerandomid', content, Auth(self.user))
self.project.save()
mock_rendered_before_update.return_value = True
url = self.project.api_url_for('wiki_page_content', wname='somerandomid')
res = self.app.get(url, auth=self.user.auth)
assert_true(res.json['rendered_before_update'])
mock_rendered_before_update.return_value = False
res = self.app.get(url, auth=self.user.auth)
assert_false(res.json['rendered_before_update'])
def test_wiki_url_for_component_returns_200(self):
component = NodeFactory(parent=self.project, is_public=True)
url = component.web_url_for('project_wiki_view', wname='home')
res = self.app.get(url)
assert_equal(res.status_code, 200)
def test_project_wiki_edit_post(self):
self.project.update_node_wiki(
'home',
content='old content',
auth=Auth(self.project.creator)
)
url = self.project.web_url_for('project_wiki_edit_post', wname='home')
res = self.app.post(url, {'content': 'new content'}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
self.project.reload()
# page was updated with new content
new_wiki = self.project.get_wiki_page('home')
assert_equal(new_wiki.content, 'new content')
def test_project_wiki_edit_post_with_new_wname_and_no_content(self):
# note: forward slashes not allowed in page_name
page_name = fake.catch_phrase().replace('/', ' ')
old_wiki_page_count = NodeWikiPage.find().count()
url = self.project.web_url_for('project_wiki_edit_post', wname=page_name)
# User submits to edit form with no content
res = self.app.post(url, {'content': ''}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
new_wiki_page_count = NodeWikiPage.find().count()
# A new wiki page was created in the db
assert_equal(new_wiki_page_count, old_wiki_page_count + 1)
# Node now has the new wiki page associated with it
self.project.reload()
new_page = self.project.get_wiki_page(page_name)
assert_is_not_none(new_page)
def test_project_wiki_edit_post_with_new_wname_and_content(self):
# note: forward slashes not allowed in page_name
page_name = fake.catch_phrase().replace('/', ' ')
page_content = fake.bs()
old_wiki_page_count = NodeWikiPage.find().count()
url = self.project.web_url_for('project_wiki_edit_post', wname=page_name)
# User submits to edit form with no content
res = self.app.post(url, {'content': page_content}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
new_wiki_page_count = NodeWikiPage.find().count()
# A new wiki page was created in the db
assert_equal(new_wiki_page_count, old_wiki_page_count + 1)
# Node now has the new wiki page associated with it
self.project.reload()
new_page = self.project.get_wiki_page(page_name)
assert_is_not_none(new_page)
# content was set
assert_equal(new_page.content, page_content)
def test_project_wiki_edit_post_with_non_ascii_title(self):
# regression test for https://github.com/CenterForOpenScience/openscienceframework.org/issues/1040
# wname doesn't exist in the db, so it will be created
new_wname = u'øˆ∆´ƒøßå√ß'
url = self.project.web_url_for('project_wiki_edit_post', wname=new_wname)
res = self.app.post(url, {'content': 'new content'}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
self.project.reload()
wiki = self.project.get_wiki_page(new_wname)
assert_equal(wiki.page_name, new_wname)
# updating content should return correct url as well.
res = self.app.post(url, {'content': 'updated content'}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
def test_project_wiki_edit_post_with_special_characters(self):
new_wname = 'title: ' + SPECIAL_CHARACTERS_ALLOWED
new_wiki_content = 'content: ' + SPECIAL_CHARACTERS_ALL
url = self.project.web_url_for('project_wiki_edit_post', wname=new_wname)
res = self.app.post(url, {'content': new_wiki_content}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
self.project.reload()
wiki = self.project.get_wiki_page(new_wname)
assert_equal(wiki.page_name, new_wname)
assert_equal(wiki.content, new_wiki_content)
assert_equal(res.status_code, 200)
def test_wiki_edit_get_home(self):
url = self.project.web_url_for('project_wiki_view', wname='home')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_project_wiki_view_scope(self):
self.project.update_node_wiki('home', 'Version 1', Auth(self.user))
self.project.update_node_wiki('home', 'Version 2', Auth(self.user))
self.project.save()
url = self.project.web_url_for('project_wiki_view', wname='home', view=2)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
url = self.project.web_url_for('project_wiki_view', wname='home', view=3)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
url = self.project.web_url_for('project_wiki_view', wname='home', view=0)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_project_wiki_compare_returns_200(self):
self.project.update_node_wiki('home', 'updated content', Auth(self.user))
self.project.save()
url = self.project.web_url_for('project_wiki_view', wname='home') + '?compare'
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_project_wiki_compare_scope(self):
self.project.update_node_wiki('home', 'Version 1', Auth(self.user))
self.project.update_node_wiki('home', 'Version 2', Auth(self.user))
self.project.save()
url = self.project.web_url_for('project_wiki_view', wname='home', compare=2)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
url = self.project.web_url_for('project_wiki_view', wname='home', compare=3)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
url = self.project.web_url_for('project_wiki_view', wname='home', compare=0)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_wiki_page_creation_strips_whitespace(self):
# Regression test for:
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1080
# wname has a trailing space
url = self.project.web_url_for('project_wiki_view', wname='cupcake ')
res = self.app.post(url, {'content': 'blah'}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
self.project.reload()
wiki = self.project.get_wiki_page('cupcake')
assert_is_not_none(wiki)
def test_wiki_validate_name(self):
url = self.project.api_url_for('project_wiki_validate_name', wname='Capslock')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_wiki_validate_name_creates_blank_page(self):
url = self.project.api_url_for('project_wiki_validate_name', wname='newpage', auth=self.consolidate_auth)
self.app.get(url, auth=self.user.auth)
self.project.reload()
assert_in('newpage', self.project.wiki_pages_current)
def test_wiki_validate_name_collision_doesnt_clear(self):
self.project.update_node_wiki('oldpage', 'some text', self.consolidate_auth)
url = self.project.api_url_for('project_wiki_validate_name', wname='oldpage', auth=self.consolidate_auth)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
url = self.project.api_url_for('wiki_page_content', wname='oldpage', auth=self.consolidate_auth)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json['wiki_content'], 'some text')
def test_wiki_validate_name_cannot_create_home(self):
url = self.project.api_url_for('project_wiki_validate_name', wname='home')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_project_wiki_validate_name_mixed_casing(self):
url = self.project.api_url_for('project_wiki_validate_name', wname='CaPsLoCk')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_not_in('capslock', self.project.wiki_pages_current)
self.project.update_node_wiki('CaPsLoCk', 'hello', self.consolidate_auth)
assert_in('capslock', self.project.wiki_pages_current)
def test_project_wiki_validate_name_diplay_correct_capitalization(self):
url = self.project.api_url_for('project_wiki_validate_name', wname='CaPsLoCk')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_in('CaPsLoCk', res)
def test_project_wiki_validate_name_conflict_different_casing(self):
url = self.project.api_url_for('project_wiki_validate_name', wname='CAPSLOCK')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.update_node_wiki('CaPsLoCk', 'hello', self.consolidate_auth)
assert_in('capslock', self.project.wiki_pages_current)
url = self.project.api_url_for('project_wiki_validate_name', wname='capslock')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_project_dashboard_shows_no_wiki_content_text(self):
# Regression test for:
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1104
project = ProjectFactory(creator=self.user)
url = project.web_url_for('view_project')
res = self.app.get(url, auth=self.user.auth)
assert_in('No wiki content', res)
def test_project_dashboard_wiki_wname_get_shows_non_ascii_characters(self):
# Regression test for:
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1104
text = u'你好'
self.project.update_node_wiki('home', text, Auth(self.user))
# can view wiki preview from project dashboard
url = self.project.web_url_for('view_project')
res = self.app.get(url, auth=self.user.auth)
assert_in(text, res)
def test_project_wiki_home_api_route(self):
url = self.project.api_url_for('project_wiki_home')
res = self.app.get(url, auth=self.user.auth)
assert_equals(res.status_code, 302)
# TODO: should this route exist? it redirects you to the web_url_for, not api_url_for.
# page_url = self.project.api_url_for('project_wiki_view', wname='home')
# assert_in(page_url, res.location)
def test_project_wiki_home_web_route(self):
page_url = self.project.web_url_for('project_wiki_view', wname='home', _guid=True)
url = self.project.web_url_for('project_wiki_home')
res = self.app.get(url, auth=self.user.auth)
assert_equals(res.status_code, 302)
assert_in(page_url, res.location)
def test_wiki_id_url_get_returns_302_and_resolves(self):
name = 'page by id'
self.project.update_node_wiki(name, 'some content', Auth(self.project.creator))
page = self.project.get_wiki_page(name)
page_url = self.project.web_url_for('project_wiki_view', wname=page.page_name, _guid=True)
url = self.project.web_url_for('project_wiki_id_page', wid=page._primary_key, _guid=True)
res = self.app.get(url)
assert_equal(res.status_code, 302)
assert_in(page_url, res.location)
res = res.follow()
assert_equal(res.status_code, 200)
assert_in(page_url, res.request.url)
def test_wiki_id_url_get_returns_404(self):
url = self.project.web_url_for('project_wiki_id_page', wid='12345', _guid=True)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
def test_home_is_capitalized_in_web_view(self):
url = self.project.web_url_for('project_wiki_home', wid='home', _guid=True)
res = self.app.get(url, auth=self.user.auth).follow(auth=self.user.auth)
page_name_elem = res.html.find('span', {'id': 'pageName'})
assert_in('Home', page_name_elem.text)
def test_wiki_widget_no_content(self):
url = self.project.api_url_for('wiki_widget', wid='home')
res = self.app.get(url, auth=self.user.auth)
assert_is_none(res.json['wiki_content'])
def test_wiki_widget_short_content_no_cutoff(self):
short_content = 'a' * 150
self.project.update_node_wiki('home', short_content, Auth(self.user))
url = self.project.api_url_for('wiki_widget', wid='home')
res = self.app.get(url, auth=self.user.auth)
assert_in(short_content, res.json['wiki_content'])
assert_not_in('...', res.json['wiki_content'])
assert_false(res.json['more'])
def test_wiki_widget_long_content_cutoff(self):
long_content = 'a' * 600
self.project.update_node_wiki('home', long_content, Auth(self.user))
url = self.project.api_url_for('wiki_widget', wid='home')
res = self.app.get(url, auth=self.user.auth)
assert_less(len(res.json['wiki_content']), 520) # wiggle room for closing tags
assert_in('...', res.json['wiki_content'])
assert_true(res.json['more'])
def test_wiki_widget_with_multiple_short_pages_has_more(self):
project = ProjectFactory(is_public=True, creator=self.user)
short_content = 'a' * 150
project.update_node_wiki('home', short_content, Auth(self.user))
project.update_node_wiki('andanotherone', short_content, Auth(self.user))
url = project.api_url_for('wiki_widget', wid='home')
res = self.app.get(url, auth=self.user.auth)
assert_true(res.json['more'])
@mock.patch('addons.wiki.models.NodeWikiPage.rendered_before_update', new_callable=mock.PropertyMock)
def test_wiki_widget_rendered_before_update(self, mock_rendered_before_update):
# New pages use js renderer
mock_rendered_before_update.return_value = False
self.project.update_node_wiki('home', 'updated content', Auth(self.user))
url = self.project.api_url_for('wiki_widget', wid='home')
res = self.app.get(url, auth=self.user.auth)
assert_false(res.json['rendered_before_update'])
# Old pages use a different version of js render
mock_rendered_before_update.return_value = True
res = self.app.get(url, auth=self.user.auth)
assert_true(res.json['rendered_before_update'])
def test_read_only_users_cannot_view_edit_pane(self):
url = self.project.web_url_for('project_wiki_view', wname='home')
# No write permissions
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_not_in('data-osf-panel="Edit"', res.text)
# Write permissions
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_in('data-osf-panel="Edit"', res.text)
# Publicly editable
wiki = self.project.get_addon('wiki')
wiki.set_editing(permissions=True, auth=self.consolidate_auth, log=True)
res = self.app.get(url, auth=AuthUserFactory().auth)
assert_equal(res.status_code, 200)
assert_in('data-osf-panel="Edit"', res.text)
# Publicly editable but not logged in
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_not_in('data-osf-panel="Edit"', res.text)
class TestViewHelpers(OsfTestCase):
def setUp(self):
super(TestViewHelpers, self).setUp()
self.project = ProjectFactory()
self.wname = 'New page'
self.project.update_node_wiki(self.wname, 'some content', Auth(self.project.creator))
def test_get_wiki_web_urls(self):
urls = views._get_wiki_web_urls(self.project, self.wname)
assert_equal(urls['base'], self.project.web_url_for('project_wiki_home', _guid=True))
assert_equal(urls['edit'], self.project.web_url_for('project_wiki_view', wname=self.wname, _guid=True))
assert_equal(urls['home'], self.project.web_url_for('project_wiki_home', _guid=True))
assert_equal(urls['page'], self.project.web_url_for('project_wiki_view', wname=self.wname, _guid=True))
def test_get_wiki_api_urls(self):
urls = views._get_wiki_api_urls(self.project, self.wname)
assert_equal(urls['base'], self.project.api_url_for('project_wiki_home'))
assert_equal(urls['delete'], self.project.api_url_for('project_wiki_delete', wname=self.wname))
assert_equal(urls['rename'], self.project.api_url_for('project_wiki_rename', wname=self.wname))
assert_equal(urls['content'], self.project.api_url_for('wiki_page_content', wname=self.wname))
assert_equal(urls['settings'], self.project.api_url_for('edit_wiki_settings'))
class TestWikiDelete(OsfTestCase):
def setUp(self):
super(TestWikiDelete, self).setUp()
creator = AuthUserFactory()
self.project = ProjectFactory(is_public=True, creator=creator)
self.consolidate_auth = Auth(user=self.project.creator)
self.auth = creator.auth
self.project.update_node_wiki('Elephants', 'Hello Elephants', self.consolidate_auth)
self.project.update_node_wiki('Lions', 'Hello Lions', self.consolidate_auth)
self.elephant_wiki = self.project.get_wiki_page('Elephants')
self.lion_wiki = self.project.get_wiki_page('Lions')
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_project_wiki_delete(self, mock_shrejs):
assert_in('elephants', self.project.wiki_pages_current)
url = self.project.api_url_for(
'project_wiki_delete',
wname='elephants'
)
self.app.delete(
url,
auth=self.auth
)
self.project.reload()
assert_not_in('elephants', self.project.wiki_pages_current)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_project_wiki_delete_w_valid_special_characters(self, mock_sharejs):
# TODO: Need to understand why calling update_node_wiki with failure causes transaction rollback issue later
# with assert_raises(NameInvalidError):
# self.project.update_node_wiki(SPECIAL_CHARACTERS_ALL, 'Hello Special Characters', self.consolidate_auth)
self.project.update_node_wiki(SPECIAL_CHARACTERS_ALLOWED, 'Hello Special Characters', self.consolidate_auth)
self.special_characters_wiki = self.project.get_wiki_page(SPECIAL_CHARACTERS_ALLOWED)
assert_in(to_mongo_key(SPECIAL_CHARACTERS_ALLOWED), self.project.wiki_pages_current)
url = self.project.api_url_for(
'project_wiki_delete',
wname=SPECIAL_CHARACTERS_ALLOWED
)
self.app.delete(
url,
auth=self.auth
)
self.project.reload()
assert_not_in(to_mongo_key(SPECIAL_CHARACTERS_ALLOWED), self.project.wiki_pages_current)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_wiki_versions_do_not_reappear_after_delete(self, mock_sharejs):
# Creates a wiki page
self.project.update_node_wiki('Hippos', 'Hello hippos', self.consolidate_auth)
# Edits it two times
assert_equal(len(self.project.wiki_pages_versions['hippos']), 1)
self.project.update_node_wiki('Hippos', 'Hello hippopotamus', self.consolidate_auth)
assert_equal(len(self.project.wiki_pages_versions['hippos']), 2)
# Deletes the wiki page
self.project.delete_node_wiki('Hippos', self.consolidate_auth)
assert_true('hippos' not in self.project.wiki_pages_versions)
# Creates new wiki with same name
self.project.update_node_wiki('Hippos', 'Hello again hippos', self.consolidate_auth)
assert_equal(len(self.project.wiki_pages_versions['hippos']), 1)
self.project.update_node_wiki('Hippos', 'Hello again hippopotamus', self.consolidate_auth)
assert_equal(len(self.project.wiki_pages_versions['hippos']), 2)
class TestWikiRename(OsfTestCase):
def setUp(self):
super(TestWikiRename, self).setUp()
creator = AuthUserFactory()
self.project = ProjectFactory(is_public=True, creator=creator)
self.consolidate_auth = Auth(user=self.project.creator)
self.auth = creator.auth
self.project.update_node_wiki('home', 'Hello world', self.consolidate_auth)
self.page_name = 'page2'
self.project.update_node_wiki(self.page_name, 'content', self.consolidate_auth)
self.project.save()
self.page = self.project.get_wiki_page(self.page_name)
self.wiki = self.project.get_wiki_page('home')
self.url = self.project.api_url_for(
'project_wiki_rename',
wname=self.page_name,
)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_rename_wiki_page_valid(self, mock_sharejs, new_name=u'away'):
self.app.put_json(
self.url,
{'value': new_name},
auth=self.auth
)
self.project.reload()
old_wiki = self.project.get_wiki_page(self.page_name)
assert_false(old_wiki)
new_wiki = self.project.get_wiki_page(new_name)
assert_true(new_wiki)
assert_equal(new_wiki._primary_key, self.page._primary_key)
assert_equal(new_wiki.content, self.page.content)
assert_equal(new_wiki.version, self.page.version)
def test_rename_wiki_page_invalid(self, new_name=u'invalid/name'):
res = self.app.put_json(
self.url,
{'value': new_name},
auth=self.auth,
expect_errors=True,
)
assert_equal(http.BAD_REQUEST, res.status_code)
assert_equal(res.json['message_short'], 'Invalid name')
assert_equal(res.json['message_long'], 'Page name cannot contain forward slashes.')
self.project.reload()
old_wiki = self.project.get_wiki_page(self.page_name)
assert_true(old_wiki)
def test_rename_wiki_page_duplicate(self):
self.project.update_node_wiki('away', 'Hello world', self.consolidate_auth)
new_name = 'away'
res = self.app.put_json(
self.url,
{'value': new_name},
auth=self.auth,
expect_errors=True,
)
assert_equal(res.status_code, 409)
def test_rename_wiki_name_not_found(self):
url = self.project.api_url_for('project_wiki_rename', wname='not_found_page_name')
res = self.app.put_json(url, {'value': 'new name'},
auth=self.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_cannot_rename_wiki_page_to_home(self):
user = AuthUserFactory()
# A fresh project where the 'home' wiki page has no content
project = ProjectFactory(creator=user)
project.update_node_wiki('Hello', 'hello world', Auth(user=user))
url = project.api_url_for('project_wiki_rename', wname=to_mongo_key('Hello'))
res = self.app.put_json(url, {'value': 'home'}, auth=user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_rename_wiki_name_with_value_missing(self):
# value is missing
res = self.app.put_json(self.url, {}, auth=self.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_rename_wiki_page_duplicate_different_casing(self):
# attempt to rename 'page2' from setup to different case of 'away'.
old_name = 'away'
new_name = 'AwAy'
self.project.update_node_wiki(old_name, 'Hello world', self.consolidate_auth)
res = self.app.put_json(
self.url,
{'value': new_name},
auth=self.auth,
expect_errors=True
)
assert_equal(res.status_code, 409)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_rename_wiki_page_same_name_different_casing(self, mock_sharejs):
old_name = 'away'
new_name = 'AWAY'
self.project.update_node_wiki(old_name, 'Hello world', self.consolidate_auth)
url = self.project.api_url_for('project_wiki_rename', wname=old_name)
res = self.app.put_json(
url,
{'value': new_name},
auth=self.auth,
expect_errors=False
)
assert_equal(res.status_code, 200)
def test_cannot_rename_home_page(self):
url = self.project.api_url_for('project_wiki_rename', wname='home')
res = self.app.put_json(url, {'value': 'homelol'}, auth=self.auth, expect_errors=True)
assert_equal(res.status_code, 400)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_can_rename_to_a_deleted_page(self, mock_sharejs):
self.project.delete_node_wiki(self.page_name, self.consolidate_auth)
self.project.save()
# Creates a new page
self.project.update_node_wiki('page3', 'moarcontent', self.consolidate_auth)
self.project.save()
# Renames the wiki to the deleted page
url = self.project.api_url_for('project_wiki_rename', wname='page3')
res = self.app.put_json(url, {'value': self.page_name}, auth=self.auth)
assert_equal(res.status_code, 200)
def test_rename_wiki_page_with_valid_html(self):
# script is not an issue since data is sanitized via bleach or mako before display.
self.test_rename_wiki_page_valid(new_name=u'<html>hello<html>')
def test_rename_wiki_page_with_invalid_html(self):
# script is not an issue since data is sanitized via bleach or mako before display.
# with that said routes still do not accept forward slashes
self.test_rename_wiki_page_invalid(new_name=u'<html>hello</html>')
def test_rename_wiki_page_with_non_ascii_title(self):
self.test_rename_wiki_page_valid(new_name=u'øˆ∆´ƒøßå√ß')
def test_rename_wiki_page_with_valid_special_character_title(self):
self.test_rename_wiki_page_valid(new_name=SPECIAL_CHARACTERS_ALLOWED)
def test_rename_wiki_page_with_invalid_special_character_title(self):
self.test_rename_wiki_page_invalid(new_name=SPECIAL_CHARACTERS_ALL)
class TestWikiLinks(OsfTestCase):
def test_links(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user)
wiki = NodeWikiFactory(
content='[[wiki2]]',
user=user,
node=project,
)
assert_in(
'/{}/wiki/wiki2/'.format(project._id),
wiki.html(project),
)
# Regression test for https://sentry.osf.io/osf/production/group/310/
def test_bad_links(self):
content = u'<span></span><iframe src="http://httpbin.org/"></iframe>'
node = ProjectFactory()
wiki = NodeWikiFactory(content=content, node=node)
expected = render_content(content, node)
assert_equal(expected, wiki.html(node))
class TestWikiUuid(OsfTestCase):
def setUp(self):
super(TestWikiUuid, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=True, creator=self.user)
self.wname = 'foo.bar'
self.wkey = to_mongo_key(self.wname)
def test_uuid_generated_once(self):
assert_is_none(self.project.wiki_private_uuids.get(self.wkey))
url = self.project.web_url_for('project_wiki_view', wname=self.wname)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
private_uuid = self.project.wiki_private_uuids.get(self.wkey)
assert_true(private_uuid)
assert_not_in(private_uuid, res.body)
assert_in(get_sharejs_uuid(self.project, self.wname), res.body)
# Revisit page; uuid has not changed
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_equal(private_uuid, self.project.wiki_private_uuids.get(self.wkey))
def test_uuid_not_visible_without_write_permission(self):
self.project.update_node_wiki(self.wname, 'some content', Auth(self.user))
self.project.save()
assert_is_none(self.project.wiki_private_uuids.get(self.wkey))
url = self.project.web_url_for('project_wiki_view', wname=self.wname)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
private_uuid = self.project.wiki_private_uuids.get(self.wkey)
assert_true(private_uuid)
assert_not_in(private_uuid, res.body)
assert_in(get_sharejs_uuid(self.project, self.wname), res.body)
# Users without write permission should not be able to access
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_not_in(get_sharejs_uuid(self.project, self.wname), res.body)
def test_uuid_not_generated_without_write_permission(self):
self.project.update_node_wiki(self.wname, 'some content', Auth(self.user))
self.project.save()
assert_is_none(self.project.wiki_private_uuids.get(self.wkey))
url = self.project.web_url_for('project_wiki_view', wname=self.wname)
res = self.app.get(url)
assert_equal(res.status_code, 200)
self.project.reload()
private_uuid = self.project.wiki_private_uuids.get(self.wkey)
assert_is_none(private_uuid)
def test_uuids_differ_between_pages(self):
wname1 = 'foo.bar'
url1 = self.project.web_url_for('project_wiki_view', wname=wname1)
res1 = self.app.get(url1, auth=self.user.auth)
assert_equal(res1.status_code, 200)
wname2 = 'bar.baz'
url2 = self.project.web_url_for('project_wiki_view', wname=wname2)
res2 = self.app.get(url2, auth=self.user.auth)
assert_equal(res2.status_code, 200)
self.project.reload()
uuid1 = get_sharejs_uuid(self.project, wname1)
uuid2 = get_sharejs_uuid(self.project, wname2)
assert_not_equal(uuid1, uuid2)
assert_in(uuid1, res1)
assert_in(uuid2, res2)
assert_not_in(uuid1, res2)
assert_not_in(uuid2, res1)
def test_uuids_differ_between_forks(self):
url = self.project.web_url_for('project_wiki_view', wname=self.wname)
project_res = self.app.get(url, auth=self.user.auth)
assert_equal(project_res.status_code, 200)
self.project.reload()
fork = self.project.fork_node(Auth(self.user))
assert_true(fork.is_fork_of(self.project))
fork_url = fork.web_url_for('project_wiki_view', wname=self.wname)
fork_res = self.app.get(fork_url, auth=self.user.auth)
assert_equal(fork_res.status_code, 200)
fork.reload()
# uuids are not copied over to forks
assert_not_equal(
self.project.wiki_private_uuids.get(self.wkey),
fork.wiki_private_uuids.get(self.wkey)
)
project_uuid = get_sharejs_uuid(self.project, self.wname)
fork_uuid = get_sharejs_uuid(fork, self.wname)
assert_not_equal(project_uuid, fork_uuid)
assert_in(project_uuid, project_res)
assert_in(fork_uuid, fork_res)
assert_not_in(project_uuid, fork_res)
assert_not_in(fork_uuid, project_res)
@pytest.mark.skip('#TODO: Fix or mock mongodb for sharejs')
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_migration_does_not_affect_forks(self, mock_sharejs):
original_uuid = generate_private_uuid(self.project, self.wname)
self.project.update_node_wiki(self.wname, 'Hello world', Auth(self.user))
fork = self.project.fork_node(Auth(self.user))
assert_equal(fork.wiki_private_uuids.get(self.wkey), None)
migrate_uuid(self.project, self.wname)
assert_not_equal(original_uuid, self.project.wiki_private_uuids.get(self.wkey))
assert_equal(fork.wiki_private_uuids.get(self.wkey), None)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_uuid_persists_after_delete(self, mock_sharejs):
assert_is_none(self.project.wiki_private_uuids.get(self.wkey))
# Create wiki page
self.project.update_node_wiki(self.wname, 'Hello world', Auth(self.user))
# Visit wiki edit page
edit_url = self.project.web_url_for('project_wiki_view', wname=self.wname)
res = self.app.get(edit_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
original_private_uuid = self.project.wiki_private_uuids.get(self.wkey)
original_sharejs_uuid = get_sharejs_uuid(self.project, self.wname)
# Delete wiki
delete_url = self.project.api_url_for('project_wiki_delete', wname=self.wname)
res = self.app.delete(delete_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_equal(original_private_uuid, self.project.wiki_private_uuids.get(self.wkey))
# Revisit wiki edit page
res = self.app.get(edit_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_equal(original_private_uuid, self.project.wiki_private_uuids.get(self.wkey))
assert_in(original_sharejs_uuid, res.body)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_uuid_persists_after_rename(self, mock_sharejs):
new_wname = 'bar.baz'
new_wkey = to_mongo_key(new_wname)
assert_is_none(self.project.wiki_private_uuids.get(self.wkey))
assert_is_none(self.project.wiki_private_uuids.get(new_wkey))
# Create wiki page
self.project.update_node_wiki(self.wname, 'Hello world', Auth(self.user))
wiki_page = self.project.get_wiki_page(self.wname)
# Visit wiki edit page
original_edit_url = self.project.web_url_for('project_wiki_view', wname=self.wname)
res = self.app.get(original_edit_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
original_private_uuid = self.project.wiki_private_uuids.get(self.wkey)
original_sharejs_uuid = get_sharejs_uuid(self.project, self.wname)
# Rename wiki
rename_url = self.project.api_url_for('project_wiki_rename', wname=self.wname)
res = self.app.put_json(
rename_url,
{'value': new_wname, 'pk': wiki_page._id},
auth=self.user.auth,
)
assert_equal(res.status_code, 200)
self.project.reload()
assert_is_none(self.project.wiki_private_uuids.get(self.wkey))
assert_equal(original_private_uuid, self.project.wiki_private_uuids.get(new_wkey))
# Revisit original wiki edit page
res = self.app.get(original_edit_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_not_equal(original_private_uuid, self.project.wiki_private_uuids.get(self.wkey))
assert_not_in(original_sharejs_uuid, res.body)
@pytest.mark.skip('#TODO: Fix or mock mongodb for sharejs')
class TestWikiShareJSMongo(OsfTestCase):
@classmethod
def setUpClass(cls):
super(TestWikiShareJSMongo, cls).setUpClass()
cls._original_sharejs_db_name = settings.SHAREJS_DB_NAME
settings.SHAREJS_DB_NAME = 'sharejs_test'
def setUp(self):
super(TestWikiShareJSMongo, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=True, creator=self.user)
self.wname = 'foo.bar'
self.wkey = to_mongo_key(self.wname)
self.private_uuid = generate_private_uuid(self.project, self.wname)
self.sharejs_uuid = get_sharejs_uuid(self.project, self.wname)
# Create wiki page
self.project.update_node_wiki(self.wname, 'Hello world', Auth(self.user))
self.wiki_page = self.project.get_wiki_page(self.wname)
# Insert mongo data for current project/wiki
self.db = share_db()
example_uuid = EXAMPLE_DOCS[0]['_id']
self.example_docs = deepcopy(EXAMPLE_DOCS)
self.example_docs[0]['_id'] = self.sharejs_uuid
self.db.docs.insert(self.example_docs)
self.example_ops = deepcopy(EXAMPLE_OPS)
for item in self.example_ops:
item['_id'] = item['_id'].replace(example_uuid, self.sharejs_uuid)
item['name'] = item['name'].replace(example_uuid, self.sharejs_uuid)
self.db.docs_ops.insert(self.example_ops)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_migrate_uuid(self, mock_sharejs):
migrate_uuid(self.project, self.wname)
assert_is_none(self.db.docs.find_one({'_id': self.sharejs_uuid}))
assert_is_none(self.db.docs_ops.find_one({'name': self.sharejs_uuid}))
new_sharejs_uuid = get_sharejs_uuid(self.project, self.wname)
assert_equal(
EXAMPLE_DOCS[0]['_data'],
self.db.docs.find_one({'_id': new_sharejs_uuid})['_data']
)
assert_equal(
len([item for item in self.example_ops if item['name'] == self.sharejs_uuid]),
len([item for item in self.db.docs_ops.find({'name': new_sharejs_uuid})])
)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_migrate_uuid_no_mongo(self, mock_sharejs):
# Case where no edits have been made to the wiki
wname = 'bar.baz'
wkey = to_mongo_key(wname)
share_uuid = generate_private_uuid(self.project, wname)
sharejs_uuid = get_sharejs_uuid(self.project, wname)
self.project.update_node_wiki(wname, 'Hello world', Auth(self.user))
migrate_uuid(self.project, wname)
assert_not_equal(share_uuid, self.project.wiki_private_uuids.get(wkey))
assert_is_none(self.db.docs.find_one({'_id': sharejs_uuid}))
assert_is_none(self.db.docs_ops.find_one({'name': sharejs_uuid}))
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_migrate_uuid_updates_node(self, mock_sharejs):
migrate_uuid(self.project, self.wname)
assert_not_equal(self.private_uuid, self.project.wiki_private_uuids[self.wkey])
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_manage_contributors_updates_uuid(self, mock_sharejs):
user = UserFactory()
self.project.add_contributor(
contributor=user,
permissions=['read', 'write', 'admin'],
auth=Auth(user=self.user),
)
self.project.save()
assert_equal(self.private_uuid, self.project.wiki_private_uuids[self.wkey])
# Removing admin permission does nothing
self.project.manage_contributors(
user_dicts=[
{'id': user._id, 'permission': 'write', 'visible': True},
{'id': self.user._id, 'permission': 'admin', 'visible': True},
],
auth=Auth(user=self.user),
save=True,
)
assert_equal(self.private_uuid, self.project.wiki_private_uuids[self.wkey])
# Removing write permission migrates uuid
self.project.manage_contributors(
user_dicts=[
{'id': user._id, 'permission': 'read', 'visible': True},
{'id': self.user._id, 'permission': 'admin', 'visible': True},
],
auth=Auth(user=self.user),
save=True,
)
assert_not_equal(self.private_uuid, self.project.wiki_private_uuids[self.wkey])
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_delete_share_doc(self, mock_sharejs):
delete_share_doc(self.project, self.wname)
assert_is_none(self.db.docs.find_one({'_id': self.sharejs_uuid}))
assert_is_none(self.db.docs_ops.find_one({'name': self.sharejs_uuid}))
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_delete_share_doc_updates_node(self, mock_sharejs):
assert_equal(self.private_uuid, self.project.wiki_private_uuids[self.wkey])
delete_share_doc(self.project, self.wname)
assert_not_in(self.wkey, self.project.wiki_private_uuids)
def test_get_draft(self):
# draft is current with latest wiki save
current_content = self.wiki_page.get_draft(self.project)
assert_equals(current_content, self.wiki_page.content)
# modify the sharejs wiki page contents and ensure we
# return the draft contents
new_content = 'I am a teapot'
new_time = int(time.time() * 1000) + 10000
new_version = self.example_docs[0]['_v'] + 1
self.db.docs.update(
{'_id': self.sharejs_uuid},
{'$set': {
'_v': new_version,
'_m.mtime': new_time,
'_data': new_content
}}
)
current_content = self.wiki_page.get_draft(self.project)
assert_equals(current_content, new_content)
def tearDown(self):
super(TestWikiShareJSMongo, self).tearDown()
self.db.drop_collection('docs')
self.db.drop_collection('docs_ops')
@classmethod
def tearDownClass(cls):
share_db().connection.drop_database(settings.SHAREJS_DB_NAME)
settings.SHARE_DATABASE_NAME = cls._original_sharejs_db_name
class TestWikiUtils(OsfTestCase):
def setUp(self):
super(TestWikiUtils, self).setUp()
self.project = ProjectFactory()
def test_get_sharejs_uuid(self):
wname = 'foo.bar'
wname2 = 'bar.baz'
private_uuid = generate_private_uuid(self.project, wname)
sharejs_uuid = get_sharejs_uuid(self.project, wname)
# Provides consistent results
assert_equal(sharejs_uuid, get_sharejs_uuid(self.project, wname))
# Provides obfuscation
assert_not_in(wname, sharejs_uuid)
assert_not_in(sharejs_uuid, wname)
assert_not_in(private_uuid, sharejs_uuid)
assert_not_in(sharejs_uuid, private_uuid)
# Differs based on share uuid provided
assert_not_equal(sharejs_uuid, get_sharejs_uuid(self.project, wname2))
# Differs across projects and forks
project = ProjectFactory()
assert_not_equal(sharejs_uuid, get_sharejs_uuid(project, wname))
fork = self.project.fork_node(Auth(self.project.creator))
assert_not_equal(sharejs_uuid, get_sharejs_uuid(fork, wname))
def test_generate_share_uuid(self):
wname = 'bar.baz'
wkey = to_mongo_key(wname)
assert_is_none(self.project.wiki_private_uuids.get(wkey))
share_uuid = generate_private_uuid(self.project, wname)
self.project.reload()
assert_equal(self.project.wiki_private_uuids[wkey], share_uuid)
new_uuid = generate_private_uuid(self.project, wname)
self.project.reload()
assert_not_equal(share_uuid, new_uuid)
assert_equal(self.project.wiki_private_uuids[wkey], new_uuid)
def test_format_wiki_version(self):
assert_is_none(format_wiki_version(None, 5, False))
assert_is_none(format_wiki_version('', 5, False))
assert_equal(format_wiki_version('3', 5, False), 3)
assert_equal(format_wiki_version('4', 5, False), 'previous')
assert_equal(format_wiki_version('5', 5, False), 'current')
assert_equal(format_wiki_version('previous', 5, False), 'previous')
assert_equal(format_wiki_version('current', 5, False), 'current')
assert_equal(format_wiki_version('preview', 5, True), 'preview')
assert_equal(format_wiki_version('current', 0, False), 'current')
assert_equal(format_wiki_version('preview', 0, True), 'preview')
with assert_raises(InvalidVersionError):
format_wiki_version('1', 0, False)
with assert_raises(InvalidVersionError):
format_wiki_version('previous', 0, False)
with assert_raises(InvalidVersionError):
format_wiki_version('6', 5, False)
with assert_raises(InvalidVersionError):
format_wiki_version('0', 5, False)
with assert_raises(InvalidVersionError):
format_wiki_version('preview', 5, False)
with assert_raises(InvalidVersionError):
format_wiki_version('nonsense', 5, True)
class TestPublicWiki(OsfTestCase):
def setUp(self):
super(TestPublicWiki, self).setUp()
self.project = ProjectFactory()
self.consolidate_auth = Auth(user=self.project.creator)
self.user = AuthUserFactory()
def test_addon_on_children(self):
parent = ProjectFactory()
node = NodeFactory(parent=parent, category='project')
sub_component = NodeFactory(parent=node)
parent.delete_addon('wiki', self.consolidate_auth)
node.delete_addon('wiki', self.consolidate_auth)
sub_component.delete_addon('wiki', self.consolidate_auth)
NodeFactory(parent=node)
has_addon_on_child_node =\
node.has_addon_on_children('wiki')
assert_true(has_addon_on_child_node)
def test_check_user_has_addon_excludes_deleted_components(self):
parent = ProjectFactory()
parent.delete_addon('wiki', self.consolidate_auth)
node = NodeFactory(parent=parent, category='project')
node.delete_addon('wiki', self.consolidate_auth)
sub_component = NodeFactory(parent=node)
sub_component.is_deleted = True
sub_component.save()
has_addon_on_child_node =\
node.has_addon_on_children('wiki')
assert_false(has_addon_on_child_node)
def test_set_editing(self):
parent = ProjectFactory()
node = NodeFactory(parent=parent, category='project', is_public=True)
wiki = node.get_addon('wiki')
# Set as publicly editable
wiki.set_editing(permissions=True, auth=self.consolidate_auth, log=True)
assert_true(wiki.is_publicly_editable)
assert_equal(node.logs.latest().action, 'made_wiki_public')
# Try to set public when the wiki is already public
with assert_raises(NodeStateError):
wiki.set_editing(permissions=True, auth=self.consolidate_auth, log=False)
# Turn off public editing
wiki.set_editing(permissions=False, auth=self.consolidate_auth, log=True)
assert_false(wiki.is_publicly_editable)
assert_equal(node.logs.latest().action, 'made_wiki_private')
node = NodeFactory(parent=parent, category='project')
wiki = node.get_addon('wiki')
# Try to set to private wiki already private
with assert_raises(NodeStateError):
wiki.set_editing(permissions=False, auth=self.consolidate_auth, log=False)
# Try to set public when the project is private
with assert_raises(NodeStateError):
wiki.set_editing(permissions=True, auth=self.consolidate_auth, log=False)
def test_serialize_wiki_settings(self):
node = NodeFactory(parent=self.project, creator=self.user, is_public=True)
node.get_addon('wiki').set_editing(
permissions=True, auth=self.consolidate_auth, log=True)
data = serialize_wiki_settings(self.user, [node])
expected = [{
'node': {
'id': node._id,
'title': node.title,
'url': node.url,
},
'children': [
{
'select': {
'title': 'permission',
'permission': 'public'
},
}
],
'kind': 'folder',
'nodeType': 'component',
'category': 'hypothesis',
'permissions': {'view': True}
}]
assert_equal(data, expected)
def test_serialize_wiki_settings(self):
node = NodeFactory(parent=self.project, creator=self.user, is_public=True)
node.get_addon('wiki').set_editing(
permissions=True, auth=self.consolidate_auth, log=True)
node.add_pointer(self.project, Auth(self.user))
node.save()
data = serialize_wiki_settings(self.user, [node])
expected = [{
'node': {
'id': node._id,
'title': node.title,
'url': node.url,
},
'children': [
{
'select': {
'title': 'permission',
'permission': 'public'
},
}
],
'kind': 'folder',
'nodeType': 'component',
'category': 'hypothesis',
'permissions': {'view': True}
}]
assert_equal(data, expected)
def test_serialize_wiki_settings_no_wiki(self):
node = NodeFactory(parent=self.project, creator=self.user)
node.delete_addon('wiki', self.consolidate_auth)
data = serialize_wiki_settings(self.user, [node])
expected = []
assert_equal(data, expected)
class TestWikiMenu(OsfTestCase):
def setUp(self):
super(TestWikiMenu, self).setUp()
self.user = UserFactory()
self.project = ProjectFactory(creator=self.user, is_public=True)
self.component = NodeFactory(creator=self.user, parent=self.project, is_public=True)
self.consolidate_auth = Auth(user=self.project.creator)
self.non_contributor = UserFactory()
def test_format_home_wiki_page_no_content(self):
data = views.format_home_wiki_page(self.project)
expected = {
'page': {
'url': self.project.web_url_for('project_wiki_home'),
'name': 'Home',
'id': 'None',
}
}
assert_equal(data, expected)
def test_format_project_wiki_pages_contributor(self):
self.project.update_node_wiki('home', 'content here', self.consolidate_auth)
self.project.update_node_wiki('zoo', 'koala', self.consolidate_auth)
home_page = self.project.get_wiki_page(name='home')
zoo_page = self.project.get_wiki_page(name='zoo')
data = views.format_project_wiki_pages(self.project, self.consolidate_auth)
expected = [
{
'page': {
'url': self.project.web_url_for('project_wiki_view', wname='home', _guid=True),
'name': 'Home',
'id': home_page._primary_key,
}
},
{
'page': {
'url': self.project.web_url_for('project_wiki_view', wname='zoo', _guid=True),
'name': 'zoo',
'id': zoo_page._primary_key,
}
}
]
assert_equal(data, expected)
def test_format_project_wiki_pages_no_content_non_contributor(self):
self.project.update_node_wiki('home', 'content here', self.consolidate_auth)
self.project.update_node_wiki('zoo', '', self.consolidate_auth)
home_page = self.project.get_wiki_page(name='home')
data = views.format_project_wiki_pages(self.project, auth=Auth(self.non_contributor))
expected = [
{
'page': {
'url': self.project.web_url_for('project_wiki_view', wname='home', _guid=True),
'name': 'Home',
'id': home_page._primary_key,
}
}
]
assert_equal(data, expected)
def test_format_component_wiki_pages_contributor(self):
self.component.update_node_wiki('home', 'home content', self.consolidate_auth)
self.component.update_node_wiki('zoo', 'koala', self.consolidate_auth)
zoo_page = self.component.get_wiki_page(name='zoo')
expected = [
{
'page': {
'name': self.component.title,
'url': self.component.web_url_for('project_wiki_view', wname='home', _guid=True),
},
'children': [
{
'page': {
'url': self.component.web_url_for('project_wiki_view', wname='home', _guid=True),
'name': 'Home',
'id': self.component._primary_key,
}
},
{
'page': {
'url': self.component.web_url_for('project_wiki_view', wname='zoo', _guid=True),
'name': 'zoo',
'id': zoo_page._primary_key,
},
}
],
'kind': 'component',
'category': self.component.category,
'pointer': False,
}
]
data = views.format_component_wiki_pages(node=self.project, auth=self.consolidate_auth)
assert_equal(data, expected)
def test_format_component_wiki_pages_no_content_non_contributor(self):
data = views.format_component_wiki_pages(node=self.project, auth=Auth(self.non_contributor))
expected = []
assert_equal(data, expected)
def test_project_wiki_grid_data(self):
self.project.update_node_wiki('home', 'project content', self.consolidate_auth)
self.component.update_node_wiki('home', 'component content', self.consolidate_auth)
data = views.project_wiki_grid_data(auth=self.consolidate_auth, wname='home', node=self.project)
expected = [
{
'title': 'Project Wiki Pages',
'kind': 'folder',
'type': 'heading',
'children': views.format_project_wiki_pages(node=self.project, auth=self.consolidate_auth),
},
{
'title': 'Component Wiki Pages',
'kind': 'folder',
'type': 'heading',
'children': views.format_component_wiki_pages(node=self.project, auth=self.consolidate_auth)
}
]
assert_equal(data, expected)
|
monikagrabowska/osf.io
|
addons/wiki/tests/test_wiki.py
|
Python
|
apache-2.0
| 60,428
|
[
"VisIt"
] |
a2f09e1091de497bebe1284b8d95c4611cb3fb1ecc8f426cbff7b2339f82b4c5
|
"""
This is used to test the ElasticSearchDB module. It is used to discover all possible changes of Elasticsearch api.
If you modify the test data, you have to update the test cases...
"""
import unittest
import sys
import datetime
import time
from DIRAC import gLogger
from DIRAC.Core.Utilities.ElasticSearchDB import ElasticSearchDB
from DIRAC.Core.Utilities.ElasticSearchDB import generateFullIndexName
elHost = 'localhost'
elPort = 9200
class ElasticTestCase(unittest.TestCase):
""" Test of ElasticSearchDB class, using local instance
"""
def __init__(self, *args, **kwargs):
super(ElasticTestCase, self).__init__(*args, **kwargs)
self.data = [{"Color": "red", "quantity": 1, "Product": "a", "timestamp": "2015-02-09 09:00:00.0"},
{"Color": "red", "quantity": 1, "Product": "b", "timestamp": "2015-02-09 16:15:00.0"},
{"Color": "red", "quantity": 1, "Product": "b", "timestamp": "2015-02-09 16:30:00.0"},
{"Color": "red", "quantity": 1, "Product": "a", "timestamp": "2015-02-09 09:00:00.0"},
{"Color": "red", "quantity": 1, "Product": "a", "timestamp": "2015-02-09 09:15:00.0"},
{"Color": "red", "quantity": 2, "Product": "b", "timestamp": "2015-02-09 16:15:00.0"},
{"Color": "red", "quantity": 1, "Product": "a", "timestamp": "2015-02-09 09:15:00.0"},
{"Color": "red", "quantity": 2, "Product": "b", "timestamp": "2015-02-09 16:15:00.0"},
{"Color": "red", "quantity": 1, "Product": "a", "timestamp": "2015-02-09 09:15:00.0"},
{"Color": "red", "quantity": 2, "Product": "b", "timestamp": "2015-02-09 16:15:00.0"}]
self.moreData = [{"Color": "red", "quantity": 1, "Product": "a", "timestamp": "2015-02-09 09:00:00.0"},
{"Color": "red", "quantity": 1, "Product": "b", "timestamp": "2015-02-09 09:15:00.0"},
{"Color": "red", "quantity": 1, "Product": "c", "timestamp": "2015-02-09 09:30:00.0"},
{"Color": "red", "quantity": 1, "Product": "d", "timestamp": "2015-02-09 10:00:00.0"},
{"Color": "red", "quantity": 1, "Product": "e", "timestamp": "2015-02-09 10:15:00.0"},
{"Color": "red", "quantity": 1, "Product": "f", "timestamp": "2015-02-09 10:30:00.0"},
{"Color": "red", "quantity": 1, "Product": "g", "timestamp": "2015-02-09 10:45:00.0"},
{"Color": "red", "quantity": 1, "Product": "h", "timestamp": "2015-02-09 11:00:00.0"},
{"Color": "red", "quantity": 1, "Product": "i", "timestamp": "2015-02-09 11:15:00.0"},
{"Color": "red", "quantity": 1, "Product": "l", "timestamp": "2015-02-09 11:30:00.0"}]
self.index_name = ''
self.maxDiff = None
def setUp(self):
gLogger.setLevel('DEBUG')
self.elasticSearchDB = ElasticSearchDB(host=elHost,
port=elPort,
useSSL=False)
def tearDown(self):
pass
class ElasticBulkCreateChain(ElasticTestCase):
""" Chain for creating indices
"""
def test_bulkindex(self):
""" bulk_index test
"""
result = self.elasticSearchDB.bulk_index('integrationtest',
'test',
self.data)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'], 10)
time.sleep(5)
indices = self.elasticSearchDB.getIndexes()
self.assertEqual(type(indices), list)
for index in indices:
res = self.elasticSearchDB.deleteIndex(index)
self.assertTrue(res['OK'])
def test_bulkindexMonthly(self):
""" bulk_index test (month)
"""
result = self.elasticSearchDB.bulk_index(indexprefix='integrationtestmontly',
doc_type='test',
data=self.data,
period='month')
self.assertTrue(result['OK'])
self.assertEqual(result['Value'], 10)
time.sleep(5)
indices = self.elasticSearchDB.getIndexes()
self.assertEqual(type(indices), list)
for index in indices:
res = self.elasticSearchDB.deleteIndex(index)
self.assertTrue(res['OK'])
class ElasticCreateChain(ElasticTestCase):
""" 2 simple tests on index creation and deletion
"""
def tearDown(self):
self.elasticSearchDB.deleteIndex(self.index_name)
def test_index(self):
""" create index test
"""
result = self.elasticSearchDB.createIndex('integrationtest', {})
self.assertTrue(result['OK'])
self.index_name = result['Value']
for i in self.data:
result = self.elasticSearchDB.index(self.index_name, 'test', i)
self.assertTrue(result['OK'])
def test_wrongdataindex(self):
""" create index test (wrong insertion)
"""
result = self.elasticSearchDB.createIndex('dsh63tsdgad', {})
self.assertTrue(result['OK'])
index_name = result['Value']
result = self.elasticSearchDB.index(index_name, 'test', {"Color": "red",
"quantity": 1,
"Product": "a",
"timestamp": 1458226213})
self.assertTrue(result['OK'])
result = self.elasticSearchDB.index(index_name, 'test', {"Color": "red",
"quantity": 1,
"Product": "a",
"timestamp": "2015-02-09T16:15:00Z"})
self.assertFalse(result['OK'])
self.assertTrue(result['Message'])
result = self.elasticSearchDB.deleteIndex(index_name)
self.assertTrue(result['OK'])
class ElasticDeleteChain(ElasticTestCase):
""" deletion tests
"""
def test_deleteNonExistingIndex(self):
""" delete non-existing index
"""
result = self.elasticSearchDB.deleteIndex('dsdssuu')
self.assertFalse(result['OK'])
self.assertTrue(result['Message'])
class ElasticTestChain(ElasticTestCase):
""" various tests chained
"""
def setUp(self):
self.elasticSearchDB = ElasticSearchDB(host=elHost,
port=elPort,
useSSL=False)
result = generateFullIndexName('integrationtest')
self.assertTrue(len(result) > len('integrationtest'))
self.index_name = result
result = self.elasticSearchDB.index(self.index_name, 'test', {"Color": "red",
"quantity": 1,
"Product": "a",
"timestamp": 1458226213})
self.assertTrue(result['OK'])
def tearDown(self):
self.elasticSearchDB.deleteIndex(self.index_name)
def test_getIndexes(self):
""" test fail if no indices are present
"""
self.elasticSearchDB.deleteIndex(self.index_name)
result = self.elasticSearchDB.getIndexes()
self.assertFalse(result) # it will be empty at this point
def test_getDocTypes(self):
""" test get document types
"""
result = self.elasticSearchDB.getDocTypes(self.index_name)
self.assertTrue(result)
self.assertEqual(result['Value']['test']['properties'].keys(), [u'Color', u'timestamp', u'Product', u'quantity'])
def test_exists(self):
result = self.elasticSearchDB.exists(self.index_name)
self.assertTrue(result)
def test_generateFullIndexName(self):
indexName = 'test'
today = datetime.datetime.today().strftime("%Y-%m-%d")
expected = "%s-%s" % (indexName, today)
result = generateFullIndexName(indexName)
self.assertEqual(result, expected)
def test_generateFullIndexName2(self):
indexName = 'test'
month = datetime.datetime.today().strftime("%Y-%m")
expected = "%s-%s" % (indexName, month)
result = generateFullIndexName(indexName, 'month')
self.assertEqual(result, expected)
def test_getUniqueValue(self):
result = self.elasticSearchDB.getUniqueValue(self.index_name, 'quantity')
self.assertTrue(result['OK'])
self.assertTrue(result['OK'])
# this, and the next (Product) are not run because (possibly only for ES 6+):
# # 'Fielddata is disabled on text fields by default.
# # Set fielddata=true on [Color] in order to load fielddata in memory by uninverting the inverted index.
# # Note that this can however use significant memory. Alternatively use a keyword field instead.'
# result = self.elasticSearchDB.getUniqueValue(self.index_name, 'Color', )
# self.assertTrue(result['OK'])
# self.assertEqual(result['Value'], [])
# result = self.elasticSearchDB.getUniqueValue(self.index_name, 'Product')
# self.assertTrue(result['OK'])
# self.assertEqual(result['Value'], [])
def test_querySimple(self):
""" simple query test
"""
self.elasticSearchDB.deleteIndex(self.index_name)
# inserting 10 entries
for i in self.moreData:
result = self.elasticSearchDB.index(self.index_name, 'test', i)
self.assertTrue(result['OK'])
time.sleep(10) # giving ES some time for indexing
# this query returns everything, so we are expecting 10 hits
body = {
'query': {
'match_all': {}
}
}
result = self.elasticSearchDB.query(self.index_name, body)
self.assertTrue(result['OK'])
self.assertTrue(isinstance(result['Value'], dict))
self.assertEqual(len(result['Value']['hits']['hits']), 10)
# this query returns nothing
body = {
'query': {
'match_none': {}
}
}
result = self.elasticSearchDB.query(self.index_name, body)
self.assertTrue(result['OK'])
self.assertTrue(isinstance(result['Value'], dict))
self.assertEqual(result['Value']['hits']['hits'], [])
# this is a wrong query
body = {
'pippo': {
'bool': {
'must': [],
'filter': []
}
}
}
result = self.elasticSearchDB.query(self.index_name, body)
self.assertFalse(result['OK'])
# this query should also return everything
body = {
'query': {
'bool': {
'must': [],
'filter': []
}
}
}
result = self.elasticSearchDB.query(self.index_name, body)
self.assertTrue(result['OK'])
self.assertTrue(isinstance(result['Value'], dict))
self.assertEqual(len(result['Value']['hits']['hits']), 10)
# def test_query(self):
# body = {"size": 0,
# {"query": {"query_string": {"query": "*"}},
# "filter": {"bool":
# {"must": [{"range":
# {"timestamp":
# {"gte": 1423399451544,
# "lte": 1423631917911
# }
# }
# }],
# "must_not": []
# }
# }
# }
# },
# "aggs": {
# "3": {
# "date_histogram": {
# "field": "timestamp",
# "interval": "3600000ms",
# "min_doc_count": 1,
# "extended_bounds": {
# "min": 1423399451544,
# "max": 1423631917911
# }
# },
# "aggs": {
# "4": {
# "terms": {
# "field": "Product",
# "size": 0,
# "order": {
# "1": "desc"
# }
# },
# "aggs": {
# "1": {
# "sum": {
# "field": "quantity"
# }
# }
# }
# }
# }
# }
# }
# }
# result = self.elasticSearchDB.query(self.index_name, body)
# self.assertEqual(result['aggregations'],
# {u'3': {u'buckets': [{u'4': {u'buckets': [{u'1': {u'value': 5.0},
# u'key': u'a',
# u'doc_count': 5}],
# u'sum_other_doc_count': 0,
# u'doc_count_error_upper_bound': 0},
# u'key': 1423468800000,
# u'doc_count': 5},
# {u'4': {u'buckets': [{u'1': {u'value': 8.0},
# u'key': u'b',
# u'doc_count': 5}],
# u'sum_other_doc_count': 0,
# u'doc_count_error_upper_bound': 0},
# u'key': 1423494000000,
# u'doc_count': 5}]}})
# FIXME: "filtered" is discontinued since ES 5.0
# def test_queryMontly(self):
# body = {"size": 0,
# "query": {"filtered": {"query": {"query_string": {"query": "*"}},
# "filter": {"bool": {"must": [{"range": {
# "timestamp": {
# "gte": 1423399451544,
# "lte": 1423631917911
# }
# }
# }],
# "must_not": []
# }
# }
# }
# },
# "aggs": {
# "3": {
# "date_histogram": {
# "field": "timestamp",
# "interval": "3600000ms",
# "min_doc_count": 1,
# "extended_bounds": {
# "min": 1423399451544,
# "max": 1423631917911
# }
# },
# "aggs": {
# "4": {
# "terms": {
# "field": "Product",
# "size": 0,
# "order": {
# "1": "desc"
# }
# },
# "aggs": {
# "1": {
# "sum": {
# "field": "quantity"
# }
# }
# }
# }
# }
# }
# }
# }
# result = self.elasticSearchDB.query('integrationtestmontly*', body)
# self.assertEqual(result['aggregations'],
# {u'3': {u'buckets': [{u'4': {u'buckets': [{u'1': {u'value': 5.0},
# u'key': u'a',
# u'doc_count': 5}],
# u'sum_other_doc_count': 0,
# u'doc_count_error_upper_bound': 0},
# u'key': 1423468800000,
# u'doc_count': 5},
# {u'4': {u'buckets': [{u'1': {u'value': 8.0},
# u'key': u'b',
# u'doc_count': 5}],
# u'sum_other_doc_count': 0,
# u'doc_count_error_upper_bound': 0},
# u'key': 1423494000000,
# u'doc_count': 5}]}})
def test_Search(self):
self.elasticSearchDB.deleteIndex(self.index_name)
# inserting 10 entries
for i in self.moreData:
result = self.elasticSearchDB.index(self.index_name, 'test', i)
self.assertTrue(result['OK'])
time.sleep(10) # giving ES some time for indexing
s = self.elasticSearchDB._Search(self.index_name)
result = s.execute()
self.assertEqual(len(result.hits), 10)
self.assertEqual(dir(result.hits[0]), [u'Color', u'Product', 'meta', u'quantity', u'timestamp'])
q = self.elasticSearchDB._Q('range', timestamp={'lte': 1423501337292, 'gte': 1423497057518})
s = self.elasticSearchDB._Search(self.index_name)
s = s.filter('bool', must=q)
query = s.to_dict()
self.assertEqual(query, {'query': {'bool': {'filter': [
{'bool': {'must': [{'range': {'timestamp': {'gte': 1423497057518, 'lte': 1423501337292}}}]}}]}}})
result = s.execute()
self.assertEqual(len(result.hits), 0)
q = self.elasticSearchDB._Q('range', timestamp={'lte': 1423631917911, 'gte': 1423399451544})
s = self.elasticSearchDB._Search(self.index_name)
s = s.filter('bool', must=q)
query = s.to_dict()
self.assertEqual(query, {'query': {'bool': {'filter': [
{'bool': {'must': [{'range': {'timestamp': {'gte': 1423399451544, 'lte': 1423631917911}}}]}}]}}})
result = s.execute()
self.assertEqual(len(result.hits), 0)
# q = [
# self.elasticSearchDB._Q(
# 'range',
# timestamp={
# 'lte': 1423631917911,
# 'gte': 1423399451544}),
# self.elasticSearchDB._Q(
# 'match',
# Product='a')]
# s = self.elasticSearchDB._Search(self.index_name)
# s = s.filter('bool', must=q)
# query = s.to_dict()
# self.assertEqual(query, {'query': {'bool': {'filter': [{'bool': {
# 'must': [{'range': {'timestamp': {'gte': 1423399451544, 'lte': 1423631917911}}},
# {'match': {'Product': 'a'}}]}}]}}})
# result = s.execute()
# self.assertEqual(len(result.hits), 5)
# self.assertEqual(result.hits[0].Product, 'a')
# self.assertEqual(result.hits[4].Product, 'a')
# def test_A1(self):
# q = [self.elasticSearchDB._Q('range', timestamp={'lte': 1423631917911, 'gte': 1423399451544})]
# s = self.elasticSearchDB._Search(self.index_name)
# s = s.filter('bool', must=q)
# a1 = self.elasticSearchDB._A('terms', field='Product', size=0)
# s.aggs.bucket('2', a1)
# query = s.to_dict()
# self.assertEqual(query, {'query': {'bool': {'filter': [{'bool': {'must': [{'range': {'timestamp': {
# 'gte': 1423399451544, 'lte': 1423631917911}}}]}}]}},
# 'aggs': {'2': {'terms': {'field': 'Product', 'size': 0}}}})
# result = s.execute()
# self.assertEqual(result.aggregations['2'].buckets, [
# {u'key': u'a', u'doc_count': 5}, {u'key': u'b', u'doc_count': 5}])
# def test_A2(self):
# q = [self.elasticSearchDB._Q('range', timestamp={'lte': 1423631917911, 'gte': 1423399451544})]
# s = self.elasticSearchDB._Search(self.index_name)
# s = s.filter('bool', must=q)
# a1 = self.elasticSearchDB._A('terms', field='Product', size=0)
# a1.metric('total_quantity', 'sum', field='quantity')
# s.aggs.bucket('2', a1)
# query = s.to_dict()
# self.assertEqual(
# query, {
# 'query': {
# 'bool': {
# 'filter': [
# {
# 'bool': {
# 'must': [
# {
# 'range': {
# 'timestamp': {
# 'gte': 1423399451544, 'lte': 1423631917911}}}]}}]}}, 'aggs': {
# '2': {
# 'terms': {
# 'field': 'Product', 'size': 0}, 'aggs': {
# 'total_quantity': {
# 'sum': {
# 'field': 'quantity'}}}}}})
# result = s.execute()
# self.assertEqual(result.aggregations['2'].buckets,
# [{u'total_quantity': {u'value': 5.0}, u'key': u'a', u'doc_count': 5}, {
# u'total_quantity': {u'value': 8.0}, u'key': u'b', u'doc_count': 5}])
# def test_piplineaggregation(self):
# q = [self.elasticSearchDB._Q('range', timestamp={'lte': 1423631917911, 'gte': 1423399451544})]
# s = self.elasticSearchDB._Search(self.index_name)
# s = s.filter('bool', must=q)
# a1 = self.elasticSearchDB._A('terms', field='Product', size=0)
# a2 = self.elasticSearchDB._A('terms', field='timestamp')
# a2.metric('total_quantity', 'sum', field='quantity')
# a1.bucket(
# 'end_data',
# 'date_histogram',
# field='timestamp',
# interval='3600000ms').metric(
# 'tt',
# a2).pipeline(
# 'avg_buckets',
# 'avg_bucket',
# buckets_path='tt>total_quantity',
# gap_policy='insert_zeros')
# s.aggs.bucket('2', a1)
# query = s.to_dict()
# self.assertEqual(
# query, {
# 'query': {
# 'bool': {
# 'filter': [
# {
# 'bool': {
# 'must': [
# {
# 'range': {
# 'timestamp': {
# 'gte': 1423399451544, 'lte': 1423631917911}}}]}}]}}, 'aggs': {
# '2': {
# 'terms': {
# 'field': 'Product', 'size': 0}, 'aggs': {
# 'end_data': {
# 'date_histogram': {
# 'field': 'timestamp', 'interval': '3600000ms'}, 'aggs': {
# 'tt': {
# 'terms': {
# 'field': 'timestamp'}, 'aggs': {
# 'total_quantity': {
# 'sum': {
# 'field': 'quantity'}}}}, 'avg_buckets': {
# 'avg_bucket': {
# 'buckets_path': 'tt>total_quantity', 'gap_policy': 'insert_zeros'}}}}}}}})
# result = s.execute()
# self.assertEqual(len(result.aggregations['2'].buckets), 2)
# self.assertEqual(result.aggregations['2'].buckets[0].key, u'a')
# self.assertEqual(result.aggregations['2'].buckets[1].key, u'b')
# self.assertEqual(result.aggregations['2'].buckets[0]['end_data'].buckets[0].avg_buckets, {u'value': 2.5})
# self.assertEqual(result.aggregations['2'].buckets[1]['end_data'].buckets[0].avg_buckets, {u'value': 4})
if __name__ == '__main__':
testSuite = unittest.defaultTestLoader.loadTestsFromTestCase(ElasticTestCase)
testSuite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ElasticCreateChain))
testSuite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ElasticBulkCreateChain))
testSuite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ElasticTestChain))
testSuite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ElasticDeleteChain))
testResult = unittest.TextTestRunner(verbosity=2).run(testSuite)
sys.exit(not testResult.wasSuccessful())
|
andresailer/DIRAC
|
tests/Integration/Core/Test_ElasticsearchDB.py
|
Python
|
gpl-3.0
| 24,576
|
[
"DIRAC"
] |
74718920b2f6f4920e02759a69b83e8bb237f331493685d2e6dd0fcd3f58a145
|
from numpy import (logical_and, asarray, pi, zeros_like,
piecewise, array, arctan2, tan, zeros, arange, floor)
from numpy.core.umath import (sqrt, exp, greater, less, cos, add, sin,
less_equal, greater_equal)
# From splinemodule.c
from .spline import cspline2d, sepfir2d
from scipy.special import comb, gamma
__all__ = ['spline_filter', 'bspline', 'gauss_spline', 'cubic', 'quadratic',
'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval']
def factorial(n):
return gamma(n + 1)
def spline_filter(Iin, lmbda=5.0):
"""Smoothing spline (cubic) filtering of a rank-2 array.
Filter an input data set, `Iin`, using a (cubic) smoothing spline of
fall-off `lmbda`.
"""
intype = Iin.dtype.char
hcol = array([1.0, 4.0, 1.0], 'f') / 6.0
if intype in ['F', 'D']:
Iin = Iin.astype('F')
ckr = cspline2d(Iin.real, lmbda)
cki = cspline2d(Iin.imag, lmbda)
outr = sepfir2d(ckr, hcol, hcol)
outi = sepfir2d(cki, hcol, hcol)
out = (outr + 1j * outi).astype(intype)
elif intype in ['f', 'd']:
ckr = cspline2d(Iin, lmbda)
out = sepfir2d(ckr, hcol, hcol)
out = out.astype(intype)
else:
raise TypeError("Invalid data type for Iin")
return out
_splinefunc_cache = {}
def _bspline_piecefunctions(order):
"""Returns the function defined over the left-side pieces for a bspline of
a given order.
The 0th piece is the first one less than 0. The last piece is a function
identical to 0 (returned as the constant 0). (There are order//2 + 2 total
pieces).
Also returns the condition functions that when evaluated return boolean
arrays for use with `numpy.piecewise`.
"""
try:
return _splinefunc_cache[order]
except KeyError:
pass
def condfuncgen(num, val1, val2):
if num == 0:
return lambda x: logical_and(less_equal(x, val1),
greater_equal(x, val2))
elif num == 2:
return lambda x: less_equal(x, val2)
else:
return lambda x: logical_and(less(x, val1),
greater_equal(x, val2))
last = order // 2 + 2
if order % 2:
startbound = -1.0
else:
startbound = -0.5
condfuncs = [condfuncgen(0, 0, startbound)]
bound = startbound
for num in range(1, last - 1):
condfuncs.append(condfuncgen(1, bound, bound - 1))
bound = bound - 1
condfuncs.append(condfuncgen(2, 0, -(order + 1) / 2.0))
# final value of bound is used in piecefuncgen below
# the functions to evaluate are taken from the left-hand side
# in the general expression derived from the central difference
# operator (because they involve fewer terms).
fval = factorial(order)
def piecefuncgen(num):
Mk = order // 2 - num
if (Mk < 0):
return 0 # final function is 0
coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
for k in range(Mk + 1)]
shifts = [-bound - k for k in range(Mk + 1)]
def thefunc(x):
res = 0.0
for k in range(Mk + 1):
res += coeffs[k] * (x + shifts[k]) ** order
return res
return thefunc
funclist = [piecefuncgen(k) for k in range(last)]
_splinefunc_cache[order] = (funclist, condfuncs)
return funclist, condfuncs
def bspline(x, n):
"""B-spline basis function of order n.
Notes
-----
Uses numpy.piecewise and automatic function-generator.
"""
ax = -abs(asarray(x))
# number of pieces on the left-side is (n+1)/2
funclist, condfuncs = _bspline_piecefunctions(n)
condlist = [func(ax) for func in condfuncs]
return piecewise(ax, condlist, funclist)
def gauss_spline(x, n):
"""Gaussian approximation to B-spline basis function of order n.
Parameters
----------
n : int
The order of the spline. Must be nonnegative, i.e., n >= 0
References
----------
.. [1] Bouma H., Vilanova A., Bescos J.O., ter Haar Romeny B.M., Gerritsen
F.A. (2007) Fast and Accurate Gaussian Derivatives Based on B-Splines. In:
Sgallari F., Murli A., Paragios N. (eds) Scale Space and Variational
Methods in Computer Vision. SSVM 2007. Lecture Notes in Computer
Science, vol 4485. Springer, Berlin, Heidelberg
"""
signsq = (n + 1) / 12.0
return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq)
def cubic(x):
"""A cubic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 3)``.
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 1)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 2.0 / 3 - 1.0 / 2 * ax1 ** 2 * (2 - ax1)
cond2 = ~cond1 & less(ax, 2)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = 1.0 / 6 * (2 - ax2) ** 3
return res
def quadratic(x):
"""A quadratic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 2)``.
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 0.5)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 0.75 - ax1 ** 2
cond2 = ~cond1 & less(ax, 1.5)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = (ax2 - 1.5) ** 2 / 2.0
return res
def _coeff_smooth(lam):
xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam)
omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi))
rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam)
rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi)
return rho, omeg
def _hc(k, cs, rho, omega):
return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) *
greater(k, -1))
def _hs(k, cs, rho, omega):
c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) /
(1 - 2 * rho * rho * cos(2 * omega) + rho ** 4))
gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega)
ak = abs(k)
return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak))
def _cubic_smooth_coeff(signal, lamb):
rho, omega = _coeff_smooth(lamb)
cs = 1 - 2 * rho * cos(omega) + rho * rho
K = len(signal)
yp = zeros((K,), signal.dtype.char)
k = arange(K)
yp[0] = (_hc(0, cs, rho, omega) * signal[0] +
add.reduce(_hc(k + 1, cs, rho, omega) * signal))
yp[1] = (_hc(0, cs, rho, omega) * signal[0] +
_hc(1, cs, rho, omega) * signal[1] +
add.reduce(_hc(k + 2, cs, rho, omega) * signal))
for n in range(2, K):
yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] -
rho * rho * yp[n - 2])
y = zeros((K,), signal.dtype.char)
y[K - 1] = add.reduce((_hs(k, cs, rho, omega) +
_hs(k + 1, cs, rho, omega)) * signal[::-1])
y[K - 2] = add.reduce((_hs(k - 1, cs, rho, omega) +
_hs(k + 2, cs, rho, omega)) * signal[::-1])
for n in range(K - 3, -1, -1):
y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] -
rho * rho * y[n + 2])
return y
def _cubic_coeff(signal):
zi = -2 + sqrt(3)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 6.0
def _quadratic_coeff(signal):
zi = -3 + 2 * sqrt(2.0)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype.char)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 8.0
def cspline1d(signal, lamb=0.0):
"""
Compute cubic spline coefficients for rank-1 array.
Find the cubic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 .
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient, default is 0.0.
Returns
-------
c : ndarray
Cubic spline coefficients.
"""
if lamb != 0.0:
return _cubic_smooth_coeff(signal, lamb)
else:
return _cubic_coeff(signal)
def qspline1d(signal, lamb=0.0):
"""Compute quadratic spline coefficients for rank-1 array.
Find the quadratic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 .
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient (must be zero for now).
Returns
-------
c : ndarray
Cubic spline coefficients.
"""
if lamb != 0.0:
raise ValueError("Smoothing quadratic splines not supported yet.")
else:
return _quadratic_coeff(signal)
def cspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of:
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
"""
newx = (asarray(newx) - x0) / float(dx)
res = zeros_like(newx, dtype=cj.dtype)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = cspline1d_eval(cj, -newx[cond1])
res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx, dtype=cj.dtype)
jlower = floor(newx - 2).astype(int) + 1
for i in range(4):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * cubic(newx - thisj)
res[cond3] = result
return res
def qspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a quadratic spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of::
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
"""
newx = (asarray(newx) - x0) / dx
res = zeros_like(newx)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = qspline1d_eval(cj, -newx[cond1])
res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx)
jlower = floor(newx - 1.5).astype(int) + 1
for i in range(3):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * quadratic(newx - thisj)
res[cond3] = result
return res
|
aeklant/scipy
|
scipy/signal/bsplines.py
|
Python
|
bsd-3-clause
| 12,007
|
[
"Gaussian"
] |
de05aa2589356a8f8f74555a521d681838b49466b7290b512ca98a8df018ca51
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2008 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
""" Purchase quote wizard definition """
import datetime
from decimal import Decimal
import gtk
from kiwi.currency import currency
from kiwi.datatypes import ValidationError
from kiwi.python import Settable
from kiwi.ui.objectlist import Column
from stoqlib.api import api
from stoqlib.domain.payment.group import PaymentGroup
from stoqlib.domain.person import Branch
from stoqlib.domain.purchase import (PurchaseOrder, PurchaseItem, QuoteGroup,
Quotation)
from stoqlib.domain.sellable import Sellable
from stoqlib.domain.views import QuotationView
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.base.lists import SimpleListDialog
from stoqlib.gui.base.wizards import (WizardEditorStep, BaseWizard,
BaseWizardStep)
from stoqlib.gui.dialogs.quotedialog import QuoteFillingDialog
from stoqlib.gui.editors.purchaseeditor import PurchaseQuoteItemEditor
from stoqlib.gui.search.searchcolumns import IdentifierColumn
from stoqlib.gui.search.searchfilters import DateSearchFilter
from stoqlib.gui.search.searchslave import SearchSlave
from stoqlib.gui.utils.printing import print_report
from stoqlib.gui.wizards.purchasewizard import (PurchaseItemStep,
PurchaseWizard)
from stoqlib.lib.dateutils import localtoday
from stoqlib.lib.message import info, yesno
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.translation import stoqlib_gettext
from stoqlib.lib.formatters import format_quantity, get_formatted_cost
from stoqlib.reporting.purchase import PurchaseQuoteReport
_ = stoqlib_gettext
#
# Wizard Steps
#
class StartQuoteStep(WizardEditorStep):
gladefile = 'StartQuoteStep'
model_type = PurchaseOrder
proxy_widgets = ['open_date', 'quote_deadline', 'branch_combo', 'notes']
def __init__(self, wizard, previous, store, model):
WizardEditorStep.__init__(self, store, wizard, model, previous)
def _setup_widgets(self):
quote_group = str(self.wizard.quote_group.identifier)
self.quote_group.set_text(quote_group)
branches = Branch.get_active_branches(self.store)
self.branch_combo.prefill(api.for_person_combo(branches))
sync_mode = api.sysparam.get_bool('SYNCHRONIZED_MODE')
self.branch_combo.set_sensitive(not sync_mode)
self.notes.set_accepts_tab(False)
def post_init(self):
self.register_validate_function(self.wizard.refresh_next)
self.force_validation()
def next_step(self):
return QuoteItemStep(self.wizard, self, self.store, self.model)
#
# BaseEditorSlave
#
def setup_proxies(self):
self._setup_widgets()
self.add_proxy(self.model, StartQuoteStep.proxy_widgets)
#
# Kiwi Callbacks
#
def on_quote_deadline__validate(self, widget, date):
if date < localtoday().date():
return ValidationError(_(u"The quote deadline date must be set to "
"today or a future date"))
class QuoteItemStep(PurchaseItemStep):
item_editor = PurchaseQuoteItemEditor
def get_sellable_view_query(self):
query = Sellable.get_unblocked_sellables_query(self.store)
return self.sellable_view, query
def setup_slaves(self):
PurchaseItemStep.setup_slaves(self)
self.cost_label.hide()
self.cost.hide()
def get_order_item(self, sellable, cost, quantity, batch=None):
assert batch is None
item = self.model.add_item(sellable, quantity)
# since we are quoting products, it should not have
# predefined cost. It should be filled later, when the
# supplier reply our quoting request.
item.cost = currency(0)
return item
def get_columns(self):
return [
Column('sellable.description', title=_('Description'),
data_type=str, expand=True, searchable=True),
Column('quantity', title=_('Quantity'), data_type=float, width=90,
format_func=format_quantity),
Column('sellable.unit_description', title=_('Unit'), data_type=str,
width=70),
]
def _setup_summary(self):
# disables summary label for the quoting list
self.summary = False
#
# WizardStep
#
def validate(self, value):
PurchaseItemStep.validate(self, value)
can_quote = not self.model.get_items().is_empty()
self.wizard.refresh_next(value and can_quote)
def post_init(self):
PurchaseItemStep.post_init(self)
if not self.has_next_step():
self.wizard.enable_finish()
def has_next_step(self):
# if we are editing a quote, this is the first and last step
return not self.wizard.edit
def next_step(self):
return QuoteSupplierStep(self.wizard, self, self.store, self.model)
class QuoteSupplierStep(WizardEditorStep):
gladefile = 'QuoteSupplierStep'
model_type = PurchaseOrder
# Class attribute so we can test it easier
product_columns = [
Column('description', title=_(u'Product'), data_type=str,
expand=True)]
def __init__(self, wizard, previous, store, model):
WizardEditorStep.__init__(self, store, wizard, model, previous)
self._setup_widgets()
def _setup_widgets(self):
self.quoting_list.set_columns(self._get_columns())
self._populate_quoting_list()
if not len(self.quoting_list) > 0:
info(_(u'No supplier have been found for any of the selected '
'items.\nThis quote will be cancelled.'))
self.wizard.finish()
def _get_columns(self):
return [Column('selected', title=" ", data_type=bool, editable=True),
Column('supplier.person.name', title=_('Supplier'),
data_type=str, sorted=True, expand=True),
Column('products_per_supplier', title=_('Supplied/Total'),
data_type=str)]
def _update_widgets(self):
selected = self.quoting_list.get_selected()
self.print_button.set_sensitive(selected is not None)
self.view_products_button.set_sensitive(selected is not None)
def _populate_quoting_list(self):
# populate the quoting list by finding the suppliers based on the
# products list
quotes = {}
total_items = 0
# O(n*n)
for item in self.model.get_items():
total_items += 1
sellable = item.sellable
product = sellable.product
for supplier_info in product.suppliers:
supplier = supplier_info.supplier
if supplier is None:
continue
if supplier not in quotes.keys():
quotes[supplier] = [sellable]
else:
quotes[supplier].append(sellable)
for supplier, items in quotes.items():
total_supplier_items = len(items)
per_supplier = _(u"%s/%s") % (total_supplier_items, total_items)
self.quoting_list.append(Settable(supplier=supplier,
items=items,
products_per_supplier=per_supplier,
selected=True))
def _print_quote(self):
selected = self.quoting_list.get_selected()
self.model.supplier = selected.supplier
print_report(PurchaseQuoteReport, self.model)
def _generate_quote(self, selected):
# we use our model as a template to create new quotes
quote = self.model.clone()
# we need to overwrite some values:
quote.group = PaymentGroup(store=self.store)
include_all = self.include_all_products.get_active()
for item in self.model.get_items():
if item.sellable in selected.items or include_all:
quote_item = item.clone()
quote_item.order = quote
quote.supplier = selected.supplier
self.wizard.quote_group.add_item(quote)
self.wizard.quote = quote
self.store.commit()
def _show_products(self):
selected = self.quoting_list.get_selected()
title = _(u'Products supplied by %s') % selected.supplier.person.name
run_dialog(SimpleListDialog, self.wizard, self.product_columns,
selected.items, title=title)
def _show_missing_products(self):
missing_products = set([i.sellable for i in self.model.get_items()])
for quote in self.quoting_list:
if quote.selected:
missing_products = missing_products.difference(quote.items)
if len(missing_products) == 0:
break
run_dialog(SimpleListDialog, self.wizard, self.product_columns,
missing_products, title=_(u'Missing Products'))
def _update_wizard(self):
# we need at least one supplier to finish this wizard
can_finish = any([i.selected for i in self.quoting_list])
self.wizard.refresh_next(can_finish)
#
# WizardStep hooks
#
def validate_step(self):
# I am using validate_step as a callback for the finish button
for item in self.quoting_list:
if item.selected:
self._generate_quote(item)
return True
def has_next_step(self):
return False
def post_init(self):
self.register_validate_function(self.wizard.refresh_next)
self.force_validation()
#
# Kiwi Callbacks
#
def on_print_button__clicked(self, widget):
self._print_quote()
def on_missing_products_button__clicked(self, widget):
self._show_missing_products()
def on_view_products_button__clicked(self, widget):
self._show_products()
def on_quoting_list__selection_changed(self, widget, item):
self._update_widgets()
def on_quoting_list__cell_edited(self, widget, item, cell):
self._update_wizard()
def on_quoting_list__row_activated(self, widget, item):
self._show_products()
class QuoteGroupSelectionStep(BaseWizardStep):
gladefile = 'QuoteGroupSelectionStep'
def __init__(self, wizard, store):
self._next_step = None
BaseWizardStep.__init__(self, store, wizard)
self._setup_slaves()
def _setup_slaves(self):
self.search = SearchSlave(self._get_columns(),
restore_name=self.__class__.__name__,
search_spec=QuotationView,
store=self.store)
self.attach_slave('search_group_holder', self.search)
self.search.set_text_field_columns(['supplier_name', 'identifier_str'])
filter = self.search.get_primary_filter()
filter.set_label(_(u'Supplier:'))
self.search.focus_search_entry()
self.search.results.connect('selection-changed',
self._on_searchlist__selection_changed)
self.search.results.connect('row-activated',
self._on_searchlist__row_activated)
date_filter = DateSearchFilter(_('Date:'))
self.search.add_filter(date_filter, columns=['open_date', 'deadline'])
self.edit_button.set_sensitive(False)
self.remove_button.set_sensitive(False)
def _get_columns(self):
return [IdentifierColumn('identifier', title=_("Quote #"), sorted=True),
IdentifierColumn('group_identifier', title=_('Group #')),
Column('supplier_name', title=_('Supplier'), data_type=str,
width=300),
Column('open_date', title=_('Open date'),
data_type=datetime.date),
Column('deadline', title=_('Deadline'),
data_type=datetime.date)]
def _can_purchase(self, item):
return item.cost > currency(0) and item.quantity > Decimal(0)
def _can_order(self, quotation):
if quotation is None:
return False
for item in quotation.purchase.get_items():
if not self._can_purchase(item):
return False
return True
def _update_view(self):
selected = self.search.results.get_selected()
has_selected = selected is not None
self.edit_button.set_sensitive(has_selected)
self.remove_button.set_sensitive(has_selected)
self.wizard.refresh_next(self._can_order(selected))
def _run_quote_editor(self):
store = api.new_store()
selected = store.fetch(self.search.results.get_selected().purchase)
retval = run_dialog(QuoteFillingDialog, self.wizard, selected, store)
store.confirm(retval)
store.close()
self._update_view()
def _remove_quote(self):
q = self.search.results.get_selected().quotation
msg = _('Are you sure you want to remove "%s" ?') % q.get_description()
if not yesno(msg, gtk.RESPONSE_NO,
_("Remove quote"), _("Don't remove")):
return
store = api.new_store()
group = store.fetch(q.group)
quote = store.fetch(q)
group.remove_item(quote)
# there is no reason to keep the group if there's no more quotes
if group.get_items().count() == 0:
store.remove(group)
store.confirm(True)
store.close()
self.search.refresh()
#
# WizardStep hooks
#
def next_step(self):
self.search.save_columns()
selected = self.search.results.get_selected()
if selected is None:
return
return QuoteGroupItemsSelectionStep(self.wizard, self.store,
selected.group, self)
#
# Callbacks
#
def _on_searchlist__selection_changed(self, widget, item):
self._update_view()
def _on_searchlist__row_activated(self, widget, item):
self._run_quote_editor()
def on_edit_button__clicked(self, widget):
self._run_quote_editor()
def on_remove_button__clicked(self, widget):
self._remove_quote()
class QuoteGroupItemsSelectionStep(BaseWizardStep):
gladefile = 'QuoteGroupItemsSelectionStep'
def __init__(self, wizard, store, group, previous=None):
self._group = group
self._next_step = None
BaseWizardStep.__init__(self, store, wizard, previous)
self._setup_widgets()
def _setup_widgets(self):
self.quoted_items.connect(
'selection-changed', self._on_quoted_items__selection_changed)
self.quoted_items.set_columns(self._get_columns())
# populate the list
for quote in self._group.get_items():
for purchase_item in quote.purchase.get_items():
if not self._can_purchase(purchase_item):
continue
sellable = purchase_item.sellable
ordered_qty = \
PurchaseItem.get_ordered_quantity(self.store, sellable)
self.quoted_items.append(Settable(
selected=True, order=quote.purchase, item=purchase_item,
description=sellable.get_description(),
supplier=quote.purchase.supplier_name,
quantity=purchase_item.quantity,
ordered_quantity=ordered_qty,
cost=purchase_item.cost))
def _get_columns(self):
return [Column('selected', title=" ", data_type=bool, editable=True),
Column('description', title=_('Description'), data_type=str,
expand=True, sorted=True),
Column('supplier', title=_('Supplier'), data_type=str,
expand=True),
Column('quantity', title=_(u'Quantity'), data_type=Decimal),
Column('ordered_quantity', title=_(u'Ordered'),
data_type=Decimal),
Column('cost', title=_(u'Cost'), data_type=currency,
format_func=get_formatted_cost)]
def _update_widgets(self):
if not self.quoted_items:
has_selected = False
else:
has_selected = any([q.selected for q in self.quoted_items])
self.create_order_button.set_sensitive(has_selected)
def _can_purchase(self, purchaseitem):
return (purchaseitem.cost > currency(0) and
purchaseitem.quantity > Decimal(0))
def _select_quotes(self, value):
for item in self.quoted_items:
item.selected = bool(value)
self.quoted_items.refresh()
self._update_widgets()
def _cancel_group(self):
msg = _("This will cancel the group and related quotes. "
"Are you sure?")
if not yesno(msg, gtk.RESPONSE_NO,
_("Cancel group"), _("Don't Cancel")):
return
store = api.new_store()
group = store.fetch(self._group)
group.cancel()
store.remove(group)
store.confirm(True)
store.close()
self.wizard.finish()
def _get_purchase_from_quote(self, quote, store):
quote_purchase = quote.purchase
real_order = quote_purchase.clone()
has_selected_items = False
# add selected items
for quoted_item in self.quoted_items:
order = store.fetch(quoted_item.order)
if order is quote_purchase and quoted_item.selected:
purchase_item = store.fetch(quoted_item.item).clone()
purchase_item.order = real_order
has_selected_items = True
# override some cloned data
real_order.group = PaymentGroup(store=store)
real_order.open_date = localtoday().date()
real_order.quote_deadline = None
real_order.status = PurchaseOrder.ORDER_PENDING
if has_selected_items:
return real_order
else:
store.remove(real_order)
def _close_quotes(self, quotes):
if not quotes:
return
if not yesno(_('Should we close the quotes used to compose the '
'purchase order ?'),
gtk.RESPONSE_NO, _("Close quotes"), _("Don't close")):
return
store = api.new_store()
for q in quotes:
quotation = store.fetch(q)
quotation.close()
store.remove(quotation)
group = store.fetch(self._group)
if group.get_items().is_empty():
store.remove(group)
store.confirm(True)
store.close()
self.wizard.finish()
def _create_orders(self):
store = api.new_store()
group = store.fetch(self._group)
quotes = []
for quote in group.get_items():
purchase = self._get_purchase_from_quote(quote, store)
if not purchase:
continue
retval = run_dialog(PurchaseWizard, self.wizard, store, purchase)
store.confirm(retval)
# keep track of the quotes that might be closed
if retval:
quotes.append(quote)
store.close()
self._close_quotes(quotes)
#
# WizardStep
#
def post_init(self):
self.wizard.enable_finish()
self.wizard.next_button.set_label(gtk.STOCK_CLOSE)
def has_next_step(self):
return False
#
# Callbacks
#
def _on_quoted_items__selection_changed(self, widget, item):
self._update_widgets()
def on_select_all_button__clicked(self, widget):
self._select_quotes(True)
def on_unselect_all_button__clicked(self, widget):
self._select_quotes(False)
def on_cancel_group_button__clicked(self, widget):
self._cancel_group()
def on_create_order_button__clicked(self, widget):
self._create_orders()
#
# Main wizards
#
class QuotePurchaseWizard(BaseWizard):
size = (775, 400)
def __init__(self, store, model=None):
title = self._get_title(model)
self.edit = model is not None
self.quote = None
self.quote_group = self._get_or_create_quote_group(model, store)
model = model or self._create_model(store)
if model.status != PurchaseOrder.ORDER_QUOTING:
raise ValueError('Invalid order status. It should '
'be ORDER_QUOTING')
first_step = StartQuoteStep(self, None, store, model)
BaseWizard.__init__(self, store, first_step, model, title=title)
def _get_title(self, model=None):
if not model:
return _('New Quote')
return _('Edit Quote')
def _create_model(self, store):
supplier_id = sysparam.get_object_id('SUGGESTED_SUPPLIER')
branch = api.get_current_branch(store)
status = PurchaseOrder.ORDER_QUOTING
group = PaymentGroup(store=store)
return PurchaseOrder(supplier_id=supplier_id,
branch=branch, status=status,
expected_receival_date=None,
responsible=api.get_current_user(store),
group=group,
store=store)
def _get_or_create_quote_group(self, order, store):
if order is not None:
quotation = store.find(Quotation, purchase=order).one()
return quotation.group
else:
return QuoteGroup(branch=api.get_current_branch(store),
store=store)
def _delete_model(self):
if self.edit:
return
for item in self.model.get_items():
self.store.remove(item)
self.store.remove(self.model)
#
# WizardStep hooks
#
def finish(self):
self._delete_model()
self.retval = self.quote
self.close()
class ReceiveQuoteWizard(BaseWizard):
title = _("Receive Quote Wizard")
size = (750, 450)
def __init__(self, store):
self.model = None
first_step = QuoteGroupSelectionStep(self, store)
BaseWizard.__init__(self, store, first_step, self.model)
self.next_button.set_sensitive(False)
#
# WizardStep hooks
#
def finish(self):
self.retval = self.model
self.close()
|
tiagocardosos/stoq
|
stoqlib/gui/wizards/purchasequotewizard.py
|
Python
|
gpl-2.0
| 23,378
|
[
"VisIt"
] |
20032670f0fe45bd7d55c8197431bce9f9bdf4a78f6464a622fdf3b91b3481b1
|
from scipy import sparse
from klampt import vectorops
import math
import numpy as np
from leastsq_bounds import leastsq_bounds
from controller import BaseController
from system_id import LinearSystemID
from online_leastsq import OnlineLeastSquares
def spdot(A, B):
"The same as np.dot(A, B), except it works even if A or B or both might be sparse."
if sparse.issparse(A) and sparse.issparse(B):
return A * B
elif sparse.issparse(A) and not sparse.issparse(B):
return (A * B).view(type=B.__class__)
elif not sparse.issparse(A) and sparse.issparse(B):
return (B.T * A.T).T.view(type=A.__class__)
else:
return np.dot(A, B)
class MotionModel(object):
"""Members inputs and outputs are labels for the input type u and the
output type v"""
def __init__(self,inputs="torque",outputs="accel"):
self.inputs = inputs
self.outputs = outputs
def eval(self,q,dq,u):
"""Returns the output v = f(q,dq,u). For forward models, u=torque
and v=ddq, and for inverse models u=ddq and v=torque.
Inputs and outputs are assumed to be Numpy 1-D arrays."""
raise NotImplementedError()
def linearization(self,q,dq):
"""Returns a model (A,b) such that the output is approximated by
v = A*u+b. A can either be a Numpy 2D array or a Scipy sparse
matrix."""
raise NotImplementedError()
def getInverse(self):
"""Returns the inverted motion model for which evaluation produces
the inputs u that would produce output v: u = f^-1(q,dq,v). """
return DefaultInverseMotionModel(self)
class DefaultInverseMotionModel(MotionModel):
"""Basic inverse motion model that uses a linear equation solve to
evaluate the result."""
def __init__(self,forward):
self.forward = forward
MotionModel.__init__(self,forward.outputs,forward.inputs)
def eval(self,q,dq,u):
(A,b) = self.forward.linearization(q,dq)
if sparse.issparse(A):
try:
return sparse.linalg.spsolve(A,u-b)
except sparse.linalg.LinAlgError:
return sparse.linalg.lsqr(A,u-b)
else:
try:
return np.linalg.solve(A,u-b)
except np.linalg.LinAlgError:
return np.linalg.lstsq(A,u-b)[0]
def linearization(self,q,dq):
(A,b) = self.forward.linearization(q,dq)
if sparse.issparse(A):
Ainv = sparse.linalg.inv(A)
else:
try:
Ainv = np.linalg.inv(A)
except np.linalg.LinAlgError:
Ainv = np.linalg.pinv(A)
return (Ainv,-spdot(Ainv,b))
class NaiveMotionModel(MotionModel):
"""An identity motion model that sets v = u"""
def __init__(self,inputs='torque',outputs='accel'):
MotionModel.__init__(self,inputs,outputs)
def eval(self,q,dq,u):
return u
def linearization(self,q,dq):
return (sparse.eye(len(dq),len(dq)),np.zeros((len(dq),)))
def getInverse():
return NaiveMotionModel(self.outname,self.inname)
class CompositeMotionModel(MotionModel):
"""Returns f(q,dq,g(q,dq,u))"""
def __init__(self,f,g):
self.f = f
self.g = g
MotionModel.__init__(self,self.g.inputs,self.f.outputs)
def eval(self,q,dq,u):
return self.f(q,dq,self.g(q,dq,u))
def linearization(self,q,dq):
Af,bf = self.f.linearization(q,dq)
Ag,bg = self.g.linearization(q,dq)
return (spdot(Af,Ag),bf+spdot(Af,bg))
class IntegratorMotionModel(MotionModel):
"""A motion model that integrates the output of another motion model
by dt"""
def __init__(self,derivativeModel,dt):
self.derivativeModel = derivativeModel
self.dt = dt
outname = "integrated_"+derivativeModel.outputs
if derivativeModel.outputs=='accel':
outname = 'velocity'
elif derivativeModel.outputs=='velocity':
outname = 'position'
MotionModel.__init__(self,derivativeModel.inputs,outname)
def eval(self,q,dq,u):
if self.derivativeModel.outputs=='accel':
return dq + self.derivativeModel.eval(q,dq,u)*dt
elif self.derivativeModel.outputs=='velocity':
return q + self.derivativeModel.eval(q,dq,u)*dt
else:
raise NotImplementedError()
def linearization(self,q,dq,dt):
(A,b) = self.derivativeModel.linearization(q,dq)
if self.derivativeModel.outputs=='accel':
return (A*dt,dq+b*dt)
elif self.derivativeModel.outputs=='velocity':
return (A*dt,q+b*dt)
else:
raise NotImplementedError()
class FreeBaseRobotMotionModel(MotionModel):
"""A relatively naive motion model with a free base and some links
in contact. The constraint equations are solved for in a least-squares
sense. Assumes the first 6 dofs are the virtual links of the free base.
Set the linksInContact member to a list of indices of the links in contact.
If the constraintWeights member is set, it is assumed to be a
list of 6*len(linksInContact) weights where each block of 6 consecutive
numbers weights the translation and rotation components of the
fixed-link constraint, respectively. (TODO: weights not done yet)
"""
def __init__(self,inname,outname,robot,linksInContact=[]):
self.robot = robot
self.linksInContact = linksInContact
self.constraintWeights = None
MotionModel.__init__(self,inname,outname)
def eval(self,q,dq,u):
if len(self.linksInContact)==0:
return np.hstack((np.zeros(6),u[6:]))
else:
A,b = self.linearization(q,dq)
return spdot(A,u)+b
def linearization(self,q,dq):
self.robot.setConfig(q)
#free-dof constrained motion model:
b = np.zeros(len(dq))
Aa = sparse.eye(len(dq)-6,len(dq)-6)
Af = sparse.csr_matrix((len(dq),6))
if len(self.linksInContact)==0:
Aaf = sparse.csr_matrix((6,len(dq)-6))
else:
#solve for first 6 rows of A by keeping foot constraints fixed
Jfs = []
Jas = []
for link in self.linksInContact:
Jl = np.array(self.robot.getLink(link).getJacobian((0,0,0)))
Jfs.append(Jl[:,0:6])
Jas.append(Jl[:,6:])
Jf = np.vstack(Jfs)
Ja = np.vstack(Jas)
#solve Jf * dqf + Ja * dqa = 0 in the least squares sense
Jfinv = np.linalg.pinv(Jf)
Aaf = -np.dot(Jfinv,Ja)
#return matrix
#[ |Aaf]
#[Af |___]
#[ |Aa ]
#[ | ]
return sparse.hstack([Af,sparse.vstack([Aaf,Aa])]),b
class RobotDynamicsMotionModel(MotionModel):
"""The basic Langrangian motion model, suitable for fully actuated robots.
B(q)*ddq + C(q,dq) + G(q) = u + fext.
By default performs gravity calculation (fext = -G(q)) but can also take
other forces and loads."""
def __init__(self,robotModel,gravity=(0,0,-9.8)):
self.robotModel = robotModel
self.gravity = gravity
MotionModel.__init__(self)
def externalForces(self,q,dq):
"""Subclasses can override this to generate other external forces besides
gravity. Assumes the robot model is updated"""
return -np.array(self.robotModel.getGravityForces(self.gravity))
def eval(self,q,dq,u):
self.robotModel.setConfig(q.tolist())
self.robotModel.setVelocity(dq.tolist())
f = self.externalForces()
return self.robotModel.accelFromTorques((u+f).tolist())
def linearization(self,q,dq):
self.robotModel.setConfig(q.tolist())
self.robotModel.setVelocity(dq.tolist())
f = self.externalForces()
C = np.array(self.robotModel.getCoriolisForces())
minv = np.array(self.robotModel.getMassMatrixInv())
return (minv,np.dot(minv,C-f))
def getInverse(self):
return RobotInverseDynamicsMotionModel(self)
class RobotInverseDynamicsMotionModel(MotionModel):
def __init__(self,forward):
self.forward = forward
MotionModel.__init__(self,forward.outputs,forward.inputs)
def eval(self,q,dq,ddq):
robotModel = self.forward.robotModel
robotModel.setConfig(q.tolist())
robotModel.setVelocity(dq.tolist())
f = self.forward.externalForces()
return np.array(robotModel.torquesFromAccel(ddq.tolist()))-f
def linearization(self,q,dq):
robotModel = self.forward.robotModel
robotModel.setConfig(q.tolist())
robotModel.setVelocity(dq.tolist())
f = self.forward.externalForces()
C = np.array(robotModel.getCoriolisForces())
m = np.array(robotModel.getMassMatrix())
return (m,C-f)
class AdaptiveMotionModel(MotionModel):
"""A motion model that performs adaptive estimation.
By default it estimates each joint independently using a linear second
order system: A[i]*(q[i],dq[i])+b[i]*(dqcmd[i])+c[i], i=1,...,n.
"""
def __init__(self,inname,outname):
MotionModel.__init__(self,inname,outname)
#TODO: other outputs besides velocity
assert (outname == 'velocity')
assert (inname == 'velocity')
self.sysids = None
def init(self,n,dt=0):
self.sysids = [LinearSystemID(2,1) for i in xrange(n)]
for i,s in enumerate(self.sysids):
dt = 0
#default motion model: basic integrator of velocity cmds
s.setModelPrior(np.array([[1,dt],[0,0]]),np.array([[0],[1]]),np.zeros(2),10)
def eval(self,q,dq,u):
if self.sysids == None:
self.init(len(q))
outindex = 1
v = np.array([s.getOutput([qi,dqi],[ui])[outindex] for (s,qi,dqi,ui) in zip(self.sysids,q,dq,u)])
return v
def linearization(self,q,dq):
if self.sysids == None:
self.init(len(q))
outindex = 1
A = sparse.lil_matrix((len(q),len(q)))
b = np.zeros(len(q))
for i,s in enumerate(self.sysids):
As,Bs,Cs = s.getModel()
A[i,i] = Bs[outindex,0]
b[i] = np.dot(As[outindex,:],[q[i],dq[i]])+Cs[outindex]
return (A,b)
def add(self,q,dq,u,qnext,dqnext):
n= len(q)
if self.sysids == None:
self.init(n)
for i in xrange(n):
#TODO: add discount as a parameter?
self.sysids[i].discount(0.1,'hyperbolic')
self.sysids[i].add([q[i],dq[i]],[u[i]],[qnext[i],dqnext[i]])
return
class GravityCompensationAdaptiveMotionModel(MotionModel):
"""A motion model that performs adaptive estimation with a gravity
compensation term.
It estimates each joint independently using a linear system
dq[i] = c1[i]*dqcmd[i]+cg[i]*G[i] + c0[i], i=1,...,n.
where dqcmd is the velocity command and G is the gravity torques
"""
def __init__(self,robot,inname,outname):
MotionModel.__init__(self,inname,outname)
#TODO: other outputs besides velocity
assert (outname == 'velocity')
assert (inname == 'velocity')
self.robot = robot
self.gravity = (0,0,-9.8)
self.estimators = None
def init(self,n,dt=0):
self.estimators = [OnlineLeastSquares(4) for i in xrange(n)]
for i,e in enumerate(self.estimators):
#default motion model: dq = 0.8*dq + 0.2*dqcmd
e.setPrior([0.8,0.3,0,0],1)
def eval(self,q,dq,u):
if self.estimators == None:
self.init(len(q))
self.robot.setConfig(q)
G = self.robot.getGravityForces(self.gravity)
v = np.array([np.dot(e.x,[dqi,ui,Gi,1.0]) for (e,dqi,ui,Gi) in zip(self.esimators,dq,u,G)])
return v
def linearization(self,q,dq):
if self.estimators == None:
self.init(len(q))
A = sparse.lil_matrix((len(q),len(q)))
b = np.zeros(len(q))
self.robot.setConfig(q)
G = self.robot.getGravityForces(self.gravity)
for i,e in enumerate(self.estimators):
cdq,cu,cG,c0 = e.x
sdq,su,sG,s0 = e.solutionStandardErrors()
A[i,i] = cu
b[i] = cdq*dq[i]+cG*G[i]+c0
sA = su
sb = math.sqrt((sdq*dq[i])**2+(sG*G[i])**2 + s0**2)
if i==30:
print "Linearization",A[i,i],b[i]
print "Standard errors",sA,sb
return (A,b)
def add(self,q,dq,u,qnext,dqnext):
n= len(q)
if self.estimators == None:
self.init(n)
self.robot.setConfig(q)
G = self.robot.getGravityForces(self.gravity)
for i,e in enumerate(self.estimators):
#TODO: add discount as a parameter?
x = [dq[i],u[i],G[i],1.0]
if i==30:
print "Reading",x,dqnext[i]
print "Old coeffs",i,e.x
print "Old residual",np.dot(e.x,x)-dqnext[i]
e.discount(0.1,'hyperbolic')
e.add(x,dqnext[i])
if i==30:
print "Coeffs",i,e.x
print "Residual",np.dot(e.x,x)-dqnext[i]
#print e.AtA
#print e.AtAinv
#print e.Atb
return
class FreeBaseAdaptiveMotionModel(AdaptiveMotionModel):
"""A motion model that performs adaptive estimation for a free-base
robot. It estimates all joints independently, and estimates the effect
of all joint velocities on the base velocity.
"""
def __init__(self,inname,outname,relevantDofs=None,robot=None):
AdaptiveMotionModel.__init__(self,inname,outname)
self.baseSysID = None
self.relevantDofs = relevantDofs
self.robot = None
def init(self,n,dt=0):
AdaptiveMotionModel.init(self,n,dt)
#baseSysID takes base rotations, joint velocities, joint commands, and constant offset
numJoints = len(self.relevantDofs) if self.relevantDofs != None else n-6
self.baseSysID = [OnlineLeastSquares(3+numJoints+numJoints+1) for i in xrange(6)]
def getQDofs(self,q):
return q[3:6]
def getDqDofs(self,dq):
if self.relevantDofs == None:
return dq[6:]
else:
return [dq[i] for i in self.relevantDofs]
def getUDofs(self,u):
if self.relevantDofs == None:
return u[6:]
else:
return [u[i] for i in self.relevantDofs]
def eval(self,q,dq,u):
v = AdaptiveMotionModel.eval(self,q,dq,u)
n = len(q)
xbase = np.hstack((self.getQDofs(q),self.getDqDofs(dq),self.getUDofs(u),[1.0]))
for i in xrange(6):
v[i] = self.baseSysID[i].x.dot(xbase)
return v
def linearization(self,q,dq):
A,b = AdaptiveMotionModel.linearization(self,q,dq)
#now fill in top right corner of A, first 6 rows of b
n = len(q)
numJoints = len(self.relevantDofs) if self.relevantDofs != None else n-6
for i in xrange(6):
A[i,i]=0
coeffs = self.baseSysID[i].x
#unpack
qCoeffs = coeffs[:3]
dqCoeffs = coeffs[3:3+numJoints]
uCoeffs = coeffs[3+numJoints:3+numJoints+numJoints]
constCoeff = coeffs[3+numJoints+numJoints]
if self.relevantDofs == None:
A[i,6:n] = uCoeffs
else:
for cd,d in zip(uCoeffs,self.relevantDofs):
A[i,d] = cd
b[i] = np.dot(dqCoeffs,self.getDqDofs(dq))+np.dot(qCoeffs,self.getQDofs(q))+constCoeff
#print uCoeffs,b[i]
return A,b
def add(self,q,dq,u,qnext,dqnext):
AdaptiveMotionModel.add(self,q,dq,u,qnext,dqnext)
n = len(q)
xbase = np.hstack((self.getQDofs(q),self.getDqDofs(dq),self.getUDofs(u),[1.0]))
for i in xrange(6):
if self.baseSysID[i].count > 10:
self.baseSysID[i].discount(0.1,'hyperbolic')
self.baseSysID[i].add(xbase,dq[i])
return
def clamp(x,a,b):
return a if x < a else (b if x > b else x)
class BoundedMotionModel(MotionModel):
"""A Langrangian motion model for underactuated robots. Forward dynamics
takes into account torque limits by capping inputs to their limits."""
def __init__(self,model,umin,umax):
self.model = model
self.umin,self.umax = umin,umax
self.boundsweight = 100
MotionModel.__init__(self,model.inputs,model.outputs)
def clampToLimits(self,x):
"""Returns a copy of x but clamped by the torque limits."""
res = type(x)([clamp(xi,tmini,tmaxi) for tmini,tmaxi,xi in zip(self.umin,self.umax,x)])
return res
def inLimits(self,x):
return all([tmini <= xi <= tmaxi for tmini,tmaxi,xi in zip(self.umin,self.umax,x)])
def eval(self,q,dq,u):
return self.model.accel(q,dq,self.clampToLimits(u))
"""
def inverseDynamics(self,q,dq,ddq):
# min_u ||ddq - accel(u)||^2 s.t. umin <= u <= umax
u0 = self.model.inverseDynamics(q,dq,ddq)
if self.inLimits(u0): return u0
# least squares problem
(A,b) = self.model.accelLinearization(q,dq)
return leastsq_bounds(lambda(u):spdot(A,u)+b-ddq,
u0,
zip(self.umin,self.umax),
Dfun=lambda(u):A,boundsweight=self.boundsweight)
"""
class ConstrainedMotionModel:
"""A Langrangian motion model for constrained / underactuated robots.
Freedofs are constrained to have zero torque. Inverse
dynamics are given by solving an equality-constrained least squares problem. """
def __init__(self,model,freeDofs):
self.model = model
self.freeDofs = freeDofs
self.actuatedDofs = [i for i in range(model.robotModel.numLinks()) if i not in freeDofs]
def constraintEquation(self,q,dq):
"""Returns (C,d) for constraint C*ddq = d"""
raise NotImplementedError()
def inverseDynamics(self,q,dq,ddq):
# least squares problem min ||A*u+b-ddq||^2 s.t. u[freedofs] = 0, C*(A*u+b)=d
(A,b) = self.model.accelLinearization(q,dq)
(C,d) = self.constraintEquation(q,dq)
Aactuated = np.vstack([A[:,i] for i in self.actuatedDofs])
# now least squares problem on actuated dofs is
# min||Aa*ua+b-ddq||^2 s.t. C*(Aa*ua+b)=d
Cinv = np.linalg.pinv(C)
np.dot(Cinv,d)
#fixed-feet motion model
#If forces f are applied such that C(q)=0, we need J(q)dq'' = 0
#J q'' = J B^-1*(t+J^T*f-C-G) = 0
#J B^-1*J^T*f = - J B^-1*(t-C-G)
#f = -(J B^-1*J^T)^-1 J B^-1*(t-C-G)
#q'' = B^-1*(t-C-G-J^T*(J B^-1*J^T)^-1 J B^-1*(t-C-G))
# = (B^-1 - B^-1 J^T*(J B^-1*J^T)^-1 J B^-1)*(t-C-G)
class DebugMotionModelController(BaseController):
def __init__(self,model,robot=None,dofs=None):
self.model = model
self.robot = robot
self.activeDofs = None
assert (model.inputs=="velocity" and model.outputs=="velocity"),"Can only debug velocity models at the moment"
self.dqpredlast = None
def output_and_advance(self,**inputs):
try:
q = inputs['q']
dq = inputs['dq']
u = vectorops.div(vectorops.sub(inputs['qcmd'],q),inputs['dt'])
except KeyError:
print "Warning, cannot debug motion model, dq or dqcmd not in input"
return None
if self.dqpredlast != None:
if self.activeDofs != None:
dq = dq[:]
for i in [i for i in range(len(q)) if i not in self.activeDofs]:
dq[i] = self.dqpredlast[i]
#compare motion model to dq
print "Motion model error:",np.linalg.norm(self.dqpredlast - np.array(dq))
(v,i) = max(zip(np.abs(self.dqpredlast - np.array(dq)).tolist(),range(len(dq))))
print " Max error:",v,"at",i,
if self.robot!=None: print self.robot.getLink(i).getName()
else: print
print " Command:",self.ulast[i],"Predicted:",self.dqpredlast[i],"Actual:",dq[i]
print " pred:",self.Alast[i,i],"*u +",self.blast[i]
#print " Predicted:",self.dqpredlast
#print " Actual:",dq
A,b = self.model.linearization(q,dq)
self.dqpredlast = A.dot(u)+b
self.ulast = u
self.Alast,self.blast = A,b
return None
|
stevekuznetsov/Klampt
|
Python/control/MotionModel.py
|
Python
|
bsd-3-clause
| 20,440
|
[
"BLAST"
] |
a03cf577e908838b1917f5f2813b651ad3b8d800fa6d441b281787dabfab044f
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 24 13:30:42 2013
$Id: transects.py 11076 2014-08-28 17:52:41Z heijer $
$Date: 2014-08-28 19:52:41 +0200 (Thu, 28 Aug 2014) $
$Author: heijer $
$Revision: 11076 $
$HeadURL: https://svn.oss.deltares.nl/repos/openearthtools/trunk/python/applications/jarkus_transects/jarkus/transects.py $
@author: heijer
"""
import logging
logger = logging.getLogger(__name__)
from netCDF4 import Dataset,num2date
import numpy as np
from numpy import asarray
from scipy.interpolate import interp1d
from threading import Lock
# shapely is not thread safe, so implement a lock
shapelock = Lock()
class Transects:
"""
Wrapper for JARKUS transects
"""
def __init__(self, *args, **kwargs):
"""
instantiate the environment
"""
if 'url' in kwargs:
self.url = kwargs.pop('url')
else:
self.url = 'http://opendap.tudelft.nl/thredds/dodsC/data2/deltares/rijkswaterstaat/jarkus/profiles/transect.nc'
try:
self.ds = Dataset(self.url)
except OSError as e:
err = ('%e. "%s" not found.' %(e,self.url))
logger.error(err)
raise err
self.dims = self.ds.dimensions
# initiate filter
self.filter = dict.fromkeys(self.dims.keys())
self.reset_filter()
self.set_filter(**kwargs)
def reset_filter(self, *args):
"""
remove filter for all dimensions (default) or for the specified dimensions only
"""
if args == ():
args = self.dims.keys()
for k in args:
self.filter[k] = np.ones((self.dims[k].__len__(),)) == 1
def set_filter(self, **kwargs):
"""
set filter by one or more keyword arguments
filters can be specified as boolean (shape must match the dimension's shape), as indices or as variable specification.
"""
for k,v in kwargs.items():
isdim = k in self.ds.dimensions.keys()
isvar = k in self.ds.variables.keys()
if (isinstance(v, bool) or isinstance(v, np.ndarray) and v.dtype == bool) and len(v) == len(self.dims[k]):
self.filter[k] = np.logical_and(self.filter[k], v)
elif isinstance(v, (int, np.integer)) and k in self.dims and np.all(np.abs(np.asarray(v)) < self.dims[k].__len__()):
self.filter[k] = np.ones((self.dims[k].__len__(),)) == 0
self.filter[k][v] = True
elif k == 'year':
self.filter['time'] = self.year2idx(v)
elif isvar and not isdim:
dimname = self.ds.variables[k].dimensions[0]
self.filter[dimname] = np.logical_and(self.filter[dimname], np.in1d(self.ds.variables[k][:], np.asarray(v)))
def get_filter(self, key):
"""
returns filter for specified key
"""
return self.filter[key]
def __exit__(self):
"""
close NetCDF file
"""
self.close()
def close(self):
"""
close NetCDF file
"""
self.ds.close()
def get_data(self, varname):
"""
returns data for specified variable and applies available filters
"""
return self.ds.variables[varname][[self.filter[k] for k in self.ds.variables[varname].dimensions]]
def areaname2areacode(self, areaname):
"""
returns areaname for a specified areacode as input.
\nToDo: include in another class of the same package "jarkus_transects", eventually.
"""
# areas according to RWS definition
areas = {"Schiermonnikoog":2,"Ameland":3,"Terschelling":4,"Vlieland":5,
"Texel":6,"Noord-Holland":7,"Rijnland":8,"Delfland":9,
"Maasvlakte":10,"Voorne":11,"Goeree":12,"Schouwen":13,
"Noord-Beveland":15,"Walcheren":16,"Zeeuws-Vlaanderen":17}
if type(areaname) == np.str:
return areas.get(areaname)
if type(areaname) == list:
return list(map(areas.get, areaname))
def time2year(self, t):
"""
convert time to year
"""
time = self.ds.variables['time']
if type(t) == np.int:
return num2date(t, time.units).year
else:
return np.asarray([y.year for y in np.asarray(num2date(t, time.units))])
def year2idx(self, year):
"""
returns boolean index array to be applied to the time dimension
"""
#time = self.ds.variables['time']
#years = [y.year for y in num2date(time, time.units)]
years = self.time2year(self.ds.variables['time'][:])
if not year:
year = years
idx = np.in1d(years, np.asarray(year))
return idx
def cross_shore2xyRD(self, cs, transect_id, axis=None):
"""
returns RD coordinates (epsg 28992) for cross-shore coordinate(s) (wrt to RSP)
"""
cs = np.asarray(cs)
transect_id = np.asarray(transect_id)
aidx = np.in1d(self.ds.variables['id'], transect_id)
cs_f = np.array((self.ds.variables['cross_shore'][0], self.ds.variables['cross_shore'][-1]))
x_f = np.array((self.ds.variables['x'][aidx,0], self.ds.variables['x'][aidx,-1]))
y_f = np.array((self.ds.variables['y'][aidx,0], self.ds.variables['y'][aidx,-1]))
px = np.polyfit(cs_f, x_f, 1)
py = np.polyfit(cs_f, y_f, 1)
x = np.polyval(px, cs)
y = np.polyval(py, cs)
return x,y
def initcc(self):
"""
initialize coordinate conversion
"""
if not hasattr(self, 'rd2latlon'):
from osgeo.osr import SpatialReference, CoordinateTransformation
# Define the Rijksdriehoek projection system (EPSG 28992)
epsg28992 = SpatialReference()
epsg28992.ImportFromEPSG(28992)
# correct the towgs84
epsg28992.SetTOWGS84(565.237,50.0087,465.658,-0.406857,0.350733,-1.87035,4.0812)
# Define the wgs84 system (EPSG 4326)
epsg4326 = SpatialReference()
epsg4326.ImportFromEPSG(4326)
self.rd2latlon = CoordinateTransformation(epsg28992, epsg4326)
#latlon2rd = CoordinateTransformation(epsg4326, epsg28992)
# Check the transformation (in case of a missing towgs84)
#latlonz = rd2latlon.TransformPoint(155000.0, 446000.0)
#print latlonz # (5.387202946158022, 52.00237563479786, 43.6057764403522)
def cross_shore2lonlat(self, cs, transect_id, axis=None):
"""
returns WGS84 (lat,lon) coordinates (epsg 4326) for cross-shore coordinate(s) (wrt to RSP)
"""
x,y = self.cross_shore2xyRD(cs, transect_id, axis=axis)
self.initcc()
xy = zip(x,y)
lat,lon,_ = zip(*self.rd2latlon.TransformPoints(xy))
return lon,lat
def MKL(self, x=None, z=None, lower=-1, upper=3):
"""
volume based instantaneous shoreline position (momentane kustlijn ligging; MKL)
if x and z are provided, they should be 1D arrays.
if not, x (cross-shore) and z (altitude) are obtained using the available filter settings
"""
if (upper-lower)<=0:
# boundaries have to consistent (upper>lower)
logger.warning('No MKL can be derived with inconsistent boundaries (lower=%g, upper=%g)'%(lower,upper))
return None
from shapely.geometry import asShape
import shapely.geometry
if x is None and z is None:
x = self.get_data('cross_shore')
z = self.get_data('altitude')
xMKL = np.ones(z.shape[:2]) * np.nan
zMKL = np.ones(z.shape[:2]) * np.nan
for it in np.arange(z.shape[0]):
for il in np.arange(z.shape[1]):
mask = z[it,il,].mask
result = self.MKL(x=x[~mask], z=z[it,il,].data[~mask], lower=lower, upper=upper)
if result:
xMKL[it,il] = result['mkl'][0]
zMKL[it,il] = result['mkl'][1]
return xMKL,zMKL
# try:
# shapelock.acquire()
if hasattr(z, 'mask'):
logger.debug('only non-masked values are retained')
x = x[z.mask]
z = z.data[z.mask]
if len(x) < 3:
logger.debug('x vector has only %i elements where at least 2 are required', len(x))
return None
# look up coordinates
X = np.c_[x, z]
# define an interpolation function
f = interp1d(x, z, kind='linear',bounds_error=False, copy=True)
# convert them to a shape
# look up the bounds of the profile
min_x = x.min()
min_z = z.min()
max_x = x.max()
# we do not want any double points, cause that invalidates a polygon (SFS)
# go down one extra, because we don't want to go backward through the same points
coords = np.r_[X,[[max_x, min_z-1],[min_x, min_z-1], X[0,:]]]
# poly_x = asShape(shapely.geometry.asPolygon(coords))
poly_x = shapely.geometry.Polygon(coords.astype('float'))
assert poly_x.is_valid
# look up the lower intersections with the lower and upper boundary
# lower
line_lower = asShape(shapely.geometry.asLineString([[min_x, lower], [max_x, lower]]))
assert line_lower.is_valid
intersects_lower = (line_lower.intersection(poly_x))
assert intersects_lower.is_valid
# upper
line_upper = asShape(shapely.geometry.asLineString([[min_x, upper], [max_x, upper]]))
assert line_upper.is_valid
intersects_upper = (line_upper.intersection(poly_x))
assert intersects_upper.is_valid
if intersects_lower.is_empty or intersects_upper.is_empty:
logger.debug('one or both boundaries does not intersect with profile')
return None
# by using the bounds, the number of intersections doesn't matter
swb = intersects_lower.bounds[2]
lwb = intersects_upper.bounds[2]
# calculate mkl using maximum method
boundary_box = shapely.geometry.asPolygon([[lwb,upper], [lwb, lower], [swb,lower], [swb, upper], [lwb,upper]])
mkl_volume = boundary_box.intersection(poly_x)
if boundary_box.area+mkl_volume.area == 0:
return None
mkl_x = lwb + (swb-lwb)*(mkl_volume.area/(boundary_box.area+mkl_volume.area))
mkl_y = f(mkl_x)
result = {}
result['mkl'] = asarray([mkl_x, mkl_y])
result['lwb'] = asarray([lwb, upper])
result['swb'] = asarray([swb, lower])
result['mkl_volume'] = mkl_volume
result['X'] = X
# finally:
# shapelock.release()
return result
def get_jrk(self):
"""
Convert current selection of data to .jrk string
"""
fmt = '%6i%6i3'
years = self.time2year(self.get_data('time'))
z = self.get_data('altitude')
o = self.get_data('origin')
aids = self.get_data('id')
x = self.get_data('cross_shore')
s = ''
for ia,aid in enumerate(aids):
for i,year in enumerate(years):
zc = np.ma.masked_invalid(np.squeeze(z[i,ia,:]))
idx = zc.mask == False
nx = np.count_nonzero(idx)
if nx == 0:
continue
zc = zc[idx]*100
xc = x[idx]
data = list(zip(xc, zc))
if not nx%5 == 0:
# fill incomplete rows with dummy values
dummyvals = [(99999, 999999)] * (5-nx%5)
data = data + dummyvals
# create header line
s = '%s%6i%6i%6i%6i%6i%6i%6i\n'%(s, (aid-aid%1e6)/1e6, year, aid%1e6, 0, 0, 0, nx)
for j,d in enumerate(zip(data)):
if d == (99999, 999999):
fmt = '%6i%6i9'
else:
# add code 3 (interpolated) to all
fmt = '%6i%6i3'
# TODO: use actual code
s = s + fmt%d[0]
if (j+1)%5==0:
s = '%s\n'%s
else:
s = '%s '%s
return s
|
openearth/jarkus
|
jarkus/transects.py
|
Python
|
gpl-3.0
| 12,785
|
[
"NetCDF"
] |
128ad1ac6f6962ec39d6e95d9dd72097108eac3a62bc5c168aed06a2aa377bf3
|
#
# Copyright (C) 2008, Brian Tanner
#
# http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import rlglued.rlglue as rlglue
which_episode = 0
def run_episode(obj, step_limit):
global which_episode
terminal = obj.run_episode(step_limit)
total_steps = obj.num_steps()
total_reward = obj.reward_return()
print "Episode " + str(which_episode) + "\t " + str(total_steps) + " steps \t" + str(
total_reward) + " total reward\t " + str(terminal) + " natural end"
which_episode += 1
# Main Program starts here
rl_glue = rlglue.RLGlue()
print "\n\nExperiment starting up!"
task_spec = rl_glue.init()
print "RL_init called, the environment sent task spec: " + task_spec
print "\n\n----------Sending some sample messages----------"
# Talk to the agent and environment a bit...*/
responseMessage = rl_glue.agent_message("what is your name?")
print "Agent responded to \"what is your name?\" with: " + responseMessage
responseMessage = rl_glue.agent_message("If at first you don't succeed; call it version 1.0")
print "Agent responded to \"If at first you don't succeed; call it version 1.0 \" with: " + responseMessage + "\n"
responseMessage = rl_glue.env_message("what is your name?")
print "Environment responded to \"what is your name?\" with: " + responseMessage
responseMessage = rl_glue.env_message("If at first you don't succeed; call it version 1.0")
print "Environment responded to \"If at first you don't succeed; call it version 1.0 \" with: " + responseMessage
print "\n\n----------Running a few episodes----------"
run_episode(rl_glue, 100)
run_episode(rl_glue, 100)
run_episode(rl_glue, 100)
run_episode(rl_glue, 100)
run_episode(rl_glue, 100)
run_episode(rl_glue, 1)
# Remember that stepLimit of 0 means there is no limit at all!*/
run_episode(rl_glue, 0)
rl_glue.cleanup()
print "\n\n----------Stepping through an episode----------"
# We could also start over and do another experiment */
task_spec = rl_glue.init()
# We could run one step at a time instead of one episode at a time */
# Start the episode */
start_response = rl_glue.start()
first_obs = start_response.o.intArray[0]
first_act = start_response.a.intArray[0]
print "First observation and action were: " + str(first_obs) + " and: " + str(first_act)
# Run one step */
stepResponse = rl_glue.step()
# Run until the episode ends*/
while stepResponse.terminal != 1:
stepResponse = rl_glue.step()
# if (stepResponse.terminal != 1)
# Could optionally print state,action pairs */
# printf("(%d,%d) ",stepResponse.o.intArray[0],stepResponse.a.intArray[0])*/
print "\n\n----------Summary----------"
totalSteps = rl_glue.num_steps()
totalReward = rl_glue.reward_return()
print "It ran for " + str(totalSteps) + " steps, total reward was: " + str(totalReward)
rl_glue.cleanup()
|
evenmarbles/rlglued
|
examples/skeleton/skeletonexperiment.py
|
Python
|
bsd-3-clause
| 3,325
|
[
"Brian"
] |
16547531c7b8dc8ae8674d9b74c3105bdaac7da53d7af839dfaca33ea58fe027
|
#
# libtcod 1.5.2 python wrapper
# Copyright (c) 2008,2009,2010 Jice & Mingos
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The name of Jice or Mingos may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY JICE AND MINGOS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL JICE OR MINGOS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
import ctypes
import struct
from ctypes import *
if not hasattr(ctypes, "c_bool"): # for Python < 2.6
c_bool = c_uint8
try: #import NumPy if available
import numpy
numpy_available = True
except ImportError:
numpy_available = False
LINUX=False
MAC=False
MINGW=False
MSVC=False
path=os.path.dirname(os.path.abspath(__file__))
if sys.platform.find('linux') != -1:
_lib = ctypes.cdll[path + '/libtcod.so']
LINUX=True
elif sys.platform.find('darwin') != -1:
_lib = ctypes.cdll[path + '/libtcod.dylib']
MAC = True
elif sys.platform.find('haiku') != -1:
_lib = ctypes.cdll[path + '/libtcod.so']
HAIKU = True
else:
try:
_lib = ctypes.cdll[path + '/libtcod-mingw.dll']
MINGW=True
except WindowsError:
_lib = ctypes.cdll[path + '/libtcod-VS.dll']
MSVC=True
# On Windows, ctypes doesn't work well with function returning structs,
# so we have to user the _wrapper functions instead
_lib.TCOD_color_multiply = _lib.TCOD_color_multiply_wrapper
_lib.TCOD_color_add = _lib.TCOD_color_add_wrapper
_lib.TCOD_color_multiply_scalar = _lib.TCOD_color_multiply_scalar_wrapper
_lib.TCOD_color_subtract = _lib.TCOD_color_subtract_wrapper
_lib.TCOD_color_lerp = _lib.TCOD_color_lerp_wrapper
_lib.TCOD_console_get_default_background = _lib.TCOD_console_get_default_background_wrapper
_lib.TCOD_console_get_default_foreground = _lib.TCOD_console_get_default_foreground_wrapper
_lib.TCOD_console_get_char_background = _lib.TCOD_console_get_char_background_wrapper
_lib.TCOD_console_get_char_foreground = _lib.TCOD_console_get_char_foreground_wrapper
_lib.TCOD_console_get_fading_color = _lib.TCOD_console_get_fading_color_wrapper
_lib.TCOD_image_get_pixel = _lib.TCOD_image_get_pixel_wrapper
_lib.TCOD_image_get_mipmap_pixel = _lib.TCOD_image_get_mipmap_pixel_wrapper
_lib.TCOD_parser_get_color_property = _lib.TCOD_parser_get_color_property_wrapper
HEXVERSION = 0x010502
STRVERSION = "1.5.2"
TECHVERSION = 0x01050200
############################
# color module
############################
class Color(Structure):
_fields_ = [('r', c_uint8),
('g', c_uint8),
('b', c_uint8),
]
def __eq__(self, c):
return _lib.TCOD_color_equals(self, c)
def __mul__(self, c):
if isinstance(c,Color):
return _lib.TCOD_color_multiply(self, c)
else:
return _lib.TCOD_color_multiply_scalar(self, c_float(c))
def __add__(self, c):
return _lib.TCOD_color_add(self, c)
def __sub__(self, c):
return _lib.TCOD_color_subtract(self, c)
def __repr__(self):
return "Color(%d,%d,%d)" % (self.r, self.g, self.b)
def __getitem__(self, i):
if type(i) == str:
return getattr(self, i)
else:
return getattr(self, "rgb"[i])
def __setitem__(self, i, c):
if type(i) == str:
setattr(self, i, c)
else:
setattr(self, "rgb"[i], c)
def __iter__(self):
yield self.r
yield self.g
yield self.b
# Should be valid on any platform, check it! Has to be done after Color is defined.
if MAC:
from cprotos import setup_protos
setup_protos(_lib)
_lib.TCOD_color_equals.restype = c_bool
_lib.TCOD_color_multiply.restype = Color
_lib.TCOD_color_multiply_scalar.restype = Color
_lib.TCOD_color_add.restype = Color
_lib.TCOD_color_subtract.restype = Color
# default colors
# grey levels
black=Color(0,0,0)
darkest_grey=Color(31,31,31)
darker_grey=Color(63,63,63)
dark_grey=Color(95,95,95)
grey=Color(127,127,127)
light_grey=Color(159,159,159)
lighter_grey=Color(191,191,191)
lightest_grey=Color(223,223,223)
darkest_gray=Color(31,31,31)
darker_gray=Color(63,63,63)
dark_gray=Color(95,95,95)
gray=Color(127,127,127)
light_gray=Color(159,159,159)
lighter_gray=Color(191,191,191)
lightest_gray=Color(223,223,223)
white=Color(255,255,255)
# sepia
darkest_sepia=Color(31,24,15)
darker_sepia=Color(63,50,31)
dark_sepia=Color(94,75,47)
sepia=Color(127,101,63)
light_sepia=Color(158,134,100)
lighter_sepia=Color(191,171,143)
lightest_sepia=Color(222,211,195)
#standard colors
red=Color(255,0,0)
flame=Color(255,63,0)
orange=Color(255,127,0)
amber=Color(255,191,0)
yellow=Color(255,255,0)
lime=Color(191,255,0)
chartreuse=Color(127,255,0)
green=Color(0,255,0)
sea=Color(0,255,127)
turquoise=Color(0,255,191)
cyan=Color(0,255,255)
sky=Color(0,191,255)
azure=Color(0,127,255)
blue=Color(0,0,255)
han=Color(63,0,255)
violet=Color(127,0,255)
purple=Color(191,0,255)
fuchsia=Color(255,0,255)
magenta=Color(255,0,191)
pink=Color(255,0,127)
crimson=Color(255,0,63)
# dark colors
dark_red=Color(191,0,0)
dark_flame=Color(191,47,0)
dark_orange=Color(191,95,0)
dark_amber=Color(191,143,0)
dark_yellow=Color(191,191,0)
dark_lime=Color(143,191,0)
dark_chartreuse=Color(95,191,0)
dark_green=Color(0,191,0)
dark_sea=Color(0,191,95)
dark_turquoise=Color(0,191,143)
dark_cyan=Color(0,191,191)
dark_sky=Color(0,143,191)
dark_azure=Color(0,95,191)
dark_blue=Color(0,0,191)
dark_han=Color(47,0,191)
dark_violet=Color(95,0,191)
dark_purple=Color(143,0,191)
dark_fuchsia=Color(191,0,191)
dark_magenta=Color(191,0,143)
dark_pink=Color(191,0,95)
dark_crimson=Color(191,0,47)
# darker colors
darker_red=Color(127,0,0)
darker_flame=Color(127,31,0)
darker_orange=Color(127,63,0)
darker_amber=Color(127,95,0)
darker_yellow=Color(127,127,0)
darker_lime=Color(95,127,0)
darker_chartreuse=Color(63,127,0)
darker_green=Color(0,127,0)
darker_sea=Color(0,127,63)
darker_turquoise=Color(0,127,95)
darker_cyan=Color(0,127,127)
darker_sky=Color(0,95,127)
darker_azure=Color(0,63,127)
darker_blue=Color(0,0,127)
darker_han=Color(31,0,127)
darker_violet=Color(63,0,127)
darker_purple=Color(95,0,127)
darker_fuchsia=Color(127,0,127)
darker_magenta=Color(127,0,95)
darker_pink=Color(127,0,63)
darker_crimson=Color(127,0,31)
# darkest colors
darkest_red=Color(63,0,0)
darkest_flame=Color(63,15,0)
darkest_orange=Color(63,31,0)
darkest_amber=Color(63,47,0)
darkest_yellow=Color(63,63,0)
darkest_lime=Color(47,63,0)
darkest_chartreuse=Color(31,63,0)
darkest_green=Color(0,63,0)
darkest_sea=Color(0,63,31)
darkest_turquoise=Color(0,63,47)
darkest_cyan=Color(0,63,63)
darkest_sky=Color(0,47,63)
darkest_azure=Color(0,31,63)
darkest_blue=Color(0,0,63)
darkest_han=Color(15,0,63)
darkest_violet=Color(31,0,63)
darkest_purple=Color(47,0,63)
darkest_fuchsia=Color(63,0,63)
darkest_magenta=Color(63,0,47)
darkest_pink=Color(63,0,31)
darkest_crimson=Color(63,0,15)
# light colors
light_red=Color(255,114,114)
light_flame=Color(255,149,114)
light_orange=Color(255,184,114)
light_amber=Color(255,219,114)
light_yellow=Color(255,255,114)
light_lime=Color(219,255,114)
light_chartreuse=Color(184,255,114)
light_green=Color(114,255,114)
light_sea=Color(114,255,184)
light_turquoise=Color(114,255,219)
light_cyan=Color(114,255,255)
light_sky=Color(114,219,255)
light_azure=Color(114,184,255)
light_blue=Color(114,114,255)
light_han=Color(149,114,255)
light_violet=Color(184,114,255)
light_purple=Color(219,114,255)
light_fuchsia=Color(255,114,255)
light_magenta=Color(255,114,219)
light_pink=Color(255,114,184)
light_crimson=Color(255,114,149)
#lighter colors
lighter_red=Color(255,165,165)
lighter_flame=Color(255,188,165)
lighter_orange=Color(255,210,165)
lighter_amber=Color(255,232,165)
lighter_yellow=Color(255,255,165)
lighter_lime=Color(232,255,165)
lighter_chartreuse=Color(210,255,165)
lighter_green=Color(165,255,165)
lighter_sea=Color(165,255,210)
lighter_turquoise=Color(165,255,232)
lighter_cyan=Color(165,255,255)
lighter_sky=Color(165,232,255)
lighter_azure=Color(165,210,255)
lighter_blue=Color(165,165,255)
lighter_han=Color(188,165,255)
lighter_violet=Color(210,165,255)
lighter_purple=Color(232,165,255)
lighter_fuchsia=Color(255,165,255)
lighter_magenta=Color(255,165,232)
lighter_pink=Color(255,165,210)
lighter_crimson=Color(255,165,188)
# lightest colors
lightest_red=Color(255,191,191)
lightest_flame=Color(255,207,191)
lightest_orange=Color(255,223,191)
lightest_amber=Color(255,239,191)
lightest_yellow=Color(255,255,191)
lightest_lime=Color(239,255,191)
lightest_chartreuse=Color(223,255,191)
lightest_green=Color(191,255,191)
lightest_sea=Color(191,255,223)
lightest_turquoise=Color(191,255,239)
lightest_cyan=Color(191,255,255)
lightest_sky=Color(191,239,255)
lightest_azure=Color(191,223,255)
lightest_blue=Color(191,191,255)
lightest_han=Color(207,191,255)
lightest_violet=Color(223,191,255)
lightest_purple=Color(239,191,255)
lightest_fuchsia=Color(255,191,255)
lightest_magenta=Color(255,191,239)
lightest_pink=Color(255,191,223)
lightest_crimson=Color(255,191,207)
# desaturated colors
desaturated_red=Color(127,63,63)
desaturated_flame=Color(127,79,63)
desaturated_orange=Color(127,95,63)
desaturated_amber=Color(127,111,63)
desaturated_yellow=Color(127,127,63)
desaturated_lime=Color(111,127,63)
desaturated_chartreuse=Color(95,127,63)
desaturated_green=Color(63,127,63)
desaturated_sea=Color(63,127,95)
desaturated_turquoise=Color(63,127,111)
desaturated_cyan=Color(63,127,127)
desaturated_sky=Color(63,111,127)
desaturated_azure=Color(63,95,127)
desaturated_blue=Color(63,63,127)
desaturated_han=Color(79,63,127)
desaturated_violet=Color(95,63,127)
desaturated_purple=Color(111,63,127)
desaturated_fuchsia=Color(127,63,127)
desaturated_magenta=Color(127,63,111)
desaturated_pink=Color(127,63,95)
desaturated_crimson=Color(127,63,79)
# metallic
brass=Color(191,151,96)
copper=Color(197,136,124)
gold=Color(229,191,0)
silver=Color(203,203,203)
# miscellaneous
celadon=Color(172,255,175)
peach=Color(255,159,127)
# color functions
_lib.TCOD_color_lerp.restype = Color
def color_lerp(c1, c2, a):
return _lib.TCOD_color_lerp(c1, c2, c_float(a))
def color_set_hsv(c, h, s, v):
_lib.TCOD_color_set_HSV(byref(c), c_float(h), c_float(s), c_float(v))
def color_get_hsv(c):
h = c_float()
s = c_float()
v = c_float()
_lib.TCOD_color_get_HSV(c, byref(h), byref(s), byref(v))
return h.value, s.value, v.value
def color_scale_HSV(c, scoef, vcoef) :
_lib.TCOD_color_scale_HSV(byref(c),c_float(scoef),c_float(vcoef))
def color_gen_map(colors, indexes):
ccolors = (Color * len(colors))(*colors)
cindexes = (c_int * len(indexes))(*indexes)
cres = (Color * (max(indexes) + 1))()
_lib.TCOD_color_gen_map(cres, len(colors), ccolors, cindexes)
return cres
############################
# console module
############################
class Key(Structure):
_fields_=[('vk', c_int),
('c', c_uint8),
('pressed', c_bool),
('lalt', c_bool),
('lctrl', c_bool),
('ralt', c_bool),
('rctrl', c_bool),
('shift', c_bool),
]
class ConsoleBuffer:
# simple console that allows direct (fast) access to cells. simplifies
# use of the "fill" functions.
def __init__(self, width, height, back_r=0, back_g=0, back_b=0, fore_r=0, fore_g=0, fore_b=0, char=' '):
# initialize with given width and height. values to fill the buffer
# are optional, defaults to black with no characters.
n = width * height
self.width = width
self.height = height
self.clear(back_r, back_g, back_b, fore_r, fore_g, fore_b, char)
def clear(self, back_r=0, back_g=0, back_b=0, fore_r=0, fore_g=0, fore_b=0, char=' '):
# clears the console. values to fill it with are optional, defaults
# to black with no characters.
n = self.width * self.height
self.back_r = [back_r] * n
self.back_g = [back_g] * n
self.back_b = [back_b] * n
self.fore_r = [fore_r] * n
self.fore_g = [fore_g] * n
self.fore_b = [fore_b] * n
self.char = [ord(char)] * n
def copy(self):
# returns a copy of this ConsoleBuffer.
other = ConsoleBuffer(0, 0)
other.width = self.width
other.height = self.height
other.back_r = list(self.back_r) # make explicit copies of all lists
other.back_g = list(self.back_g)
other.back_b = list(self.back_b)
other.fore_r = list(self.fore_r)
other.fore_g = list(self.fore_g)
other.fore_b = list(self.fore_b)
other.char = list(self.char)
return other
def set_fore(self, x, y, r, g, b, char):
# set the character and foreground color of one cell.
i = self.width * y + x
self.fore_r[i] = r
self.fore_g[i] = g
self.fore_b[i] = b
self.char[i] = ord(char)
def set_back(self, x, y, r, g, b):
# set the background color of one cell.
i = self.width * y + x
self.back_r[i] = r
self.back_g[i] = g
self.back_b[i] = b
def set(self, x, y, back_r, back_g, back_b, fore_r, fore_g, fore_b, char):
# set the background color, foreground color and character of one cell.
i = self.width * y + x
self.back_r[i] = back_r
self.back_g[i] = back_g
self.back_b[i] = back_b
self.fore_r[i] = fore_r
self.fore_g[i] = fore_g
self.fore_b[i] = fore_b
self.char[i] = ord(char)
def blit(self, dest, fill_fore=True, fill_back=True):
# use libtcod's "fill" functions to write the buffer to a console.
if (console_get_width(dest) != self.width or
console_get_height(dest) != self.height):
raise ValueError('ConsoleBuffer.blit: Destination console has an incorrect size.')
s = struct.Struct('%di' % len(self.back_r))
if fill_back:
_lib.TCOD_console_fill_background(dest, (c_int * len(self.back_r))(*self.back_r), (c_int * len(self.back_g))(*self.back_g), (c_int * len(self.back_b))(*self.back_b))
if fill_fore:
_lib.TCOD_console_fill_foreground(dest, (c_int * len(self.fore_r))(*self.fore_r), (c_int * len(self.fore_g))(*self.fore_g), (c_int * len(self.fore_b))(*self.fore_b))
_lib.TCOD_console_fill_char(dest, (c_int * len(self.char))(*self.char))
_lib.TCOD_console_credits_render.restype = c_bool
_lib.TCOD_console_is_fullscreen.restype = c_bool
_lib.TCOD_console_is_window_closed.restype = c_bool
_lib.TCOD_console_has_mouse_focus.restype = c_bool
_lib.TCOD_console_is_active.restype = c_bool
_lib.TCOD_console_get_default_background.restype = Color
_lib.TCOD_console_get_default_foreground.restype = Color
_lib.TCOD_console_get_char_background.restype = Color
_lib.TCOD_console_get_char_foreground.restype = Color
_lib.TCOD_console_get_fading_color.restype = Color
_lib.TCOD_console_is_key_pressed.restype = c_bool
# background rendering modes
BKGND_NONE = 0
BKGND_SET = 1
BKGND_MULTIPLY = 2
BKGND_LIGHTEN = 3
BKGND_DARKEN = 4
BKGND_SCREEN = 5
BKGND_COLOR_DODGE = 6
BKGND_COLOR_BURN = 7
BKGND_ADD = 8
BKGND_ADDA = 9
BKGND_BURN = 10
BKGND_OVERLAY = 11
BKGND_ALPH = 12
BKGND_DEFAULT=13
def BKGND_ALPHA(a):
return BKGND_ALPH | (int(a * 255) << 8)
def BKGND_ADDALPHA(a):
return BKGND_ADDA | (int(a * 255) << 8)
# non blocking key events types
KEY_PRESSED = 1
KEY_RELEASED = 2
# key codes
KEY_NONE = 0
KEY_ESCAPE = 1
KEY_BACKSPACE = 2
KEY_TAB = 3
KEY_ENTER = 4
KEY_SHIFT = 5
KEY_CONTROL = 6
KEY_ALT = 7
KEY_PAUSE = 8
KEY_CAPSLOCK = 9
KEY_PAGEUP = 10
KEY_PAGEDOWN = 11
KEY_END = 12
KEY_HOME = 13
KEY_UP = 14
KEY_LEFT = 15
KEY_RIGHT = 16
KEY_DOWN = 17
KEY_PRINTSCREEN = 18
KEY_INSERT = 19
KEY_DELETE = 20
KEY_LWIN = 21
KEY_RWIN = 22
KEY_APPS = 23
KEY_0 = 24
KEY_1 = 25
KEY_2 = 26
KEY_3 = 27
KEY_4 = 28
KEY_5 = 29
KEY_6 = 30
KEY_7 = 31
KEY_8 = 32
KEY_9 = 33
KEY_KP0 = 34
KEY_KP1 = 35
KEY_KP2 = 36
KEY_KP3 = 37
KEY_KP4 = 38
KEY_KP5 = 39
KEY_KP6 = 40
KEY_KP7 = 41
KEY_KP8 = 42
KEY_KP9 = 43
KEY_KPADD = 44
KEY_KPSUB = 45
KEY_KPDIV = 46
KEY_KPMUL = 47
KEY_KPDEC = 48
KEY_KPENTER = 49
KEY_F1 = 50
KEY_F2 = 51
KEY_F3 = 52
KEY_F4 = 53
KEY_F5 = 54
KEY_F6 = 55
KEY_F7 = 56
KEY_F8 = 57
KEY_F9 = 58
KEY_F10 = 59
KEY_F11 = 60
KEY_F12 = 61
KEY_NUMLOCK = 62
KEY_SCROLLLOCK = 63
KEY_SPACE = 64
KEY_CHAR = 65
# special chars
# single walls
CHAR_HLINE = 196
CHAR_VLINE = 179
CHAR_NE = 191
CHAR_NW = 218
CHAR_SE = 217
CHAR_SW = 192
CHAR_TEEW = 180
CHAR_TEEE = 195
CHAR_TEEN = 193
CHAR_TEES = 194
CHAR_CROSS = 197
# double walls
CHAR_DHLINE = 205
CHAR_DVLINE = 186
CHAR_DNE = 187
CHAR_DNW = 201
CHAR_DSE = 188
CHAR_DSW = 200
CHAR_DTEEW = 185
CHAR_DTEEE = 204
CHAR_DTEEN = 202
CHAR_DTEES = 203
CHAR_DCROSS = 206
# blocks
CHAR_BLOCK1 = 176
CHAR_BLOCK2 = 177
CHAR_BLOCK3 = 178
# arrows
CHAR_ARROW_N = 24
CHAR_ARROW_S = 25
CHAR_ARROW_E = 26
CHAR_ARROW_W = 27
# arrows without tail
CHAR_ARROW2_N = 30
CHAR_ARROW2_S = 31
CHAR_ARROW2_E = 16
CHAR_ARROW2_W = 17
# double arrows
CHAR_DARROW_H = 29
CHAR_DARROW_V = 18
# GUI stuff
CHAR_CHECKBOX_UNSET = 224
CHAR_CHECKBOX_SET = 225
CHAR_RADIO_UNSET = 9
CHAR_RADIO_SET = 10
# sub-pixel resolution kit
CHAR_SUBP_NW = 226
CHAR_SUBP_NE = 227
CHAR_SUBP_N = 228
CHAR_SUBP_SE = 229
CHAR_SUBP_DIAG = 230
CHAR_SUBP_E = 231
CHAR_SUBP_SW = 232
# misc characters
CHAR_BULLET = 7
CHAR_BULLET_INV = 8
CHAR_BULLET_SQUARE = 254
CHAR_CENT = 189
CHAR_CLUB = 5
CHAR_COPYRIGHT = 184
CHAR_CURRENCY = 207
CHAR_DIAMOND = 4
CHAR_DIVISION = 246
CHAR_EXCLAM_DOUBLE = 19
CHAR_FEMALE = 12
CHAR_FUNCTION = 159
CHAR_GRADE = 248
CHAR_HALF = 171
CHAR_HEART = 3
CHAR_LIGHT = 15
CHAR_MALE = 11
CHAR_MULTIPLICATION = 158
CHAR_NOTE = 13
CHAR_NOTE_DOUBLE = 14
CHAR_ONE_QUARTER = 172
CHAR_PILCROW = 20
CHAR_POUND = 156
CHAR_POW1 = 251
CHAR_POW2 = 253
CHAR_POW3 = 252
CHAR_RESERVED = 169
CHAR_SECTION = 21
CHAR_SMILIE = 1
CHAR_SMILIE_INV = 2
CHAR_SPADE = 6
CHAR_THREE_QUARTERS = 243
CHAR_UMLAUT = 249
CHAR_YEN = 190
# font flags
FONT_LAYOUT_ASCII_INCOL = 1
FONT_LAYOUT_ASCII_INROW = 2
FONT_TYPE_GREYSCALE = 4
FONT_TYPE_GRAYSCALE = 4
FONT_LAYOUT_TCOD = 8
# color control codes
COLCTRL_1=1
COLCTRL_2=2
COLCTRL_3=3
COLCTRL_4=4
COLCTRL_5=5
COLCTRL_NUMBER=5
COLCTRL_FORE_RGB=6
COLCTRL_BACK_RGB=7
COLCTRL_STOP=8
# renderers
RENDERER_GLSL=0
RENDERER_OPENGL=1
RENDERER_SDL=2
NB_RENDERERS=3
# alignment
LEFT=0
RIGHT=1
CENTER=2
# initializing the console
def console_init_root(w, h, title, fullscreen=False, renderer=RENDERER_SDL):
_lib.TCOD_console_init_root(w, h, c_char_p(title), fullscreen, renderer)
def console_get_width(con):
return _lib.TCOD_console_get_width(con)
def console_get_height(con):
return _lib.TCOD_console_get_height(con)
def console_set_custom_font(fontFile, flags=FONT_LAYOUT_ASCII_INCOL, nb_char_horiz=0, nb_char_vertic=0):
_lib.TCOD_console_set_custom_font(c_char_p(fontFile), flags, nb_char_horiz, nb_char_vertic)
def console_map_ascii_code_to_font(asciiCode, fontCharX, fontCharY):
if type(asciiCode) == str or type(asciiCode) == bytes:
_lib.TCOD_console_map_ascii_code_to_font(ord(asciiCode), fontCharX,
fontCharY)
else:
_lib.TCOD_console_map_ascii_code_to_font(asciiCode, fontCharX,
fontCharY)
def console_map_ascii_codes_to_font(firstAsciiCode, nbCodes, fontCharX,
fontCharY):
if type(firstAsciiCode) == str or type(firstAsciiCode) == bytes:
_lib.TCOD_console_map_ascii_codes_to_font(ord(firstAsciiCode), nbCodes,
fontCharX, fontCharY)
else:
_lib.TCOD_console_map_ascii_codes_to_font(firstAsciiCode, nbCodes,
fontCharX, fontCharY)
def console_map_string_to_font(s, fontCharX, fontCharY):
if type(s) == bytes:
_lib.TCOD_console_map_string_to_font(s, fontCharX, fontCharY)
else:
_lib.TCOD_console_map_string_to_font_utf(s, fontCharX, fontCharY)
def console_is_fullscreen():
return _lib.TCOD_console_is_fullscreen()
def console_set_fullscreen(fullscreen):
_lib.TCOD_console_set_fullscreen(c_int(fullscreen))
def console_is_window_closed():
return _lib.TCOD_console_is_window_closed()
def console_has_mouse_focus():
return _lib.TCOD_console_has_mouse_focus()
def console_is_active():
return _lib.TCOD_console_is_active()
def console_set_window_title(title):
_lib.TCOD_console_set_window_title(c_char_p(title))
def console_credits():
_lib.TCOD_console_credits()
def console_credits_reset():
_lib.TCOD_console_credits_reset()
def console_credits_render(x, y, alpha):
return _lib.TCOD_console_credits_render(x, y, c_int(alpha))
def console_flush():
_lib.TCOD_console_flush()
# drawing on a console
def console_set_default_background(con, col):
_lib.TCOD_console_set_default_background(con, col)
def console_set_default_foreground(con, col):
_lib.TCOD_console_set_default_foreground(con, col)
def console_clear(con):
return _lib.TCOD_console_clear(con)
def console_put_char(con, x, y, c, flag=BKGND_DEFAULT):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_put_char(con, x, y, ord(c), flag)
else:
_lib.TCOD_console_put_char(con, x, y, c, flag)
def console_put_char_ex(con, x, y, c, fore, back):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_put_char_ex(con, x, y, ord(c), fore, back)
else:
_lib.TCOD_console_put_char_ex(con, x, y, c, fore, back)
def console_set_char_background(con, x, y, col, flag=BKGND_SET):
_lib.TCOD_console_set_char_background(con, x, y, col, flag)
def console_set_char_foreground(con, x, y, col):
_lib.TCOD_console_set_char_foreground(con, x, y, col)
def console_set_char(con, x, y, c):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_set_char(con, x, y, ord(c))
else:
_lib.TCOD_console_set_char(con, x, y, c)
def console_set_background_flag(con, flag):
_lib.TCOD_console_set_background_flag(con, c_int(flag))
def console_get_background_flag(con):
return _lib.TCOD_console_get_background_flag(con)
def console_set_alignment(con, alignment):
_lib.TCOD_console_set_alignment(con, c_int(alignment))
def console_get_alignment(con):
return _lib.TCOD_console_get_alignment(con)
def console_print(con, x, y, fmt):
if type(fmt) == bytes:
_lib.TCOD_console_print(c_void_p(con), x, y, c_char_p(fmt))
else:
_lib.TCOD_console_print_utf(c_void_p(con), x, y, fmt)
def console_print_ex(con, x, y, flag, alignment, fmt):
if type(fmt) == bytes:
_lib.TCOD_console_print_ex(c_void_p(con), x, y, flag, alignment, c_char_p(fmt))
else:
_lib.TCOD_console_print_ex_utf(c_void_p(con), x, y, flag, alignment, fmt)
def console_print_rect(con, x, y, w, h, fmt):
if type(fmt) == bytes:
return _lib.TCOD_console_print_rect(c_void_p(con), x, y, w, h, c_char_p(fmt))
else:
return _lib.TCOD_console_print_rect_utf(c_void_p(con), x, y, w, h, fmt)
def console_print_rect_ex(con, x, y, w, h, flag, alignment, fmt):
if type(fmt) == bytes:
return _lib.TCOD_console_print_rect_ex(c_void_p(con), x, y, w, h, flag, alignment, c_char_p(fmt))
else:
return _lib.TCOD_console_print_rect_ex_utf(c_void_p(con), x, y, w, h, flag, alignment, fmt)
def console_get_height_rect(con, x, y, w, h, fmt):
if type(fmt) == bytes:
return _lib.TCOD_console_get_height_rect(c_void_p(con), x, y, w, h, c_char_p(fmt))
else:
return _lib.TCOD_console_get_height_rect_utf(c_void_p(con), x, y, w, h, fmt)
def console_rect(con, x, y, w, h, clr, flag=BKGND_DEFAULT):
_lib.TCOD_console_rect(con, x, y, w, h, c_int(clr), flag)
def console_hline(con, x, y, l, flag=BKGND_DEFAULT):
_lib.TCOD_console_hline( con, x, y, l, flag)
def console_vline(con, x, y, l, flag=BKGND_DEFAULT):
_lib.TCOD_console_vline( con, x, y, l, flag)
def console_print_frame(con, x, y, w, h, clear=True, flag=BKGND_DEFAULT, fmt=0):
_lib.TCOD_console_print_frame(c_void_p(con), x, y, w, h, c_int(clear), flag, c_char_p(fmt))
def console_set_color_control(con,fore,back) :
_lib.TCOD_console_set_color_control(con,fore,back)
def console_get_default_background(con):
return _lib.TCOD_console_get_default_background(con)
def console_get_default_foreground(con):
return _lib.TCOD_console_get_default_foreground(con)
def console_get_char_background(con, x, y):
return _lib.TCOD_console_get_char_background(con, x, y)
def console_get_char_foreground(con, x, y):
return _lib.TCOD_console_get_char_foreground(con, x, y)
def console_get_char(con, x, y):
return _lib.TCOD_console_get_char(con, x, y)
def console_set_fade(fade, fadingColor):
_lib.TCOD_console_set_fade(fade, fadingColor)
##_lib.TCOD_console_set_fade_wrapper(fade, fadingColor)
def console_get_fade():
return _lib.TCOD_console_get_fade().value
def console_get_fading_color():
return _lib.TCOD_console_get_fading_color()
# handling keyboard input
def console_wait_for_keypress(flush):
k=Key()
_lib.TCOD_console_wait_for_keypress_wrapper(byref(k),c_bool(flush))
return k
def console_check_for_keypress(flags=KEY_RELEASED):
k=Key()
_lib.TCOD_console_check_for_keypress_wrapper(byref(k),c_int(flags))
return k
def console_is_key_pressed(key):
return _lib.TCOD_console_is_key_pressed(key)
def console_set_keyboard_repeat(initial_delay, interval):
_lib.TCOD_console_set_keyboard_repeat(initial_delay, interval)
def console_disable_keyboard_repeat():
_lib.TCOD_console_disable_keyboard_repeat()
# using offscreen consoles
def console_new(w, h):
return _lib.TCOD_console_new(w, h)
def console_from_file(filename):
return _lib.TCOD_console_from_file(filename)
def console_get_width(con):
return _lib.TCOD_console_get_width(con)
def console_get_height(con):
return _lib.TCOD_console_get_height(con)
def console_blit(src, x, y, w, h, dst, xdst, ydst, ffade=1.0,bfade=1.0):
_lib.TCOD_console_blit(src, x, y, w, h, dst, xdst, ydst, c_float(ffade), c_float(bfade))
def console_set_key_color(con, col):
_lib.TCOD_console_set_key_color(con, col)
def console_delete(con):
_lib.TCOD_console_delete(con)
# fast color filling
def console_fill_foreground(con,r,g,b) :
if len(r) != len(g) or len(r) != len(b):
raise TypeError('R, G and B must all have the same size.')
if (numpy_available and isinstance(r, numpy.ndarray) and
isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):
#numpy arrays, use numpy's ctypes functions
r = numpy.ascontiguousarray(r, dtype=numpy.int32)
g = numpy.ascontiguousarray(g, dtype=numpy.int32)
b = numpy.ascontiguousarray(b, dtype=numpy.int32)
cr = r.ctypes.data_as(POINTER(c_int))
cg = g.ctypes.data_as(POINTER(c_int))
cb = b.ctypes.data_as(POINTER(c_int))
else:
# otherwise convert using ctypes arrays
cr = (c_int * len(r))(*r)
cg = (c_int * len(g))(*g)
cb = (c_int * len(b))(*b)
_lib.TCOD_console_fill_foreground(con, cr, cg, cb)
def console_fill_background(con,r,g,b) :
if len(r) != len(g) or len(r) != len(b):
raise TypeError('R, G and B must all have the same size.')
if (numpy_available and isinstance(r, numpy.ndarray) and
isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):
#numpy arrays, use numpy's ctypes functions
r = numpy.ascontiguousarray(r, dtype=numpy.int32)
g = numpy.ascontiguousarray(g, dtype=numpy.int32)
b = numpy.ascontiguousarray(b, dtype=numpy.int32)
cr = r.ctypes.data_as(POINTER(c_int))
cg = g.ctypes.data_as(POINTER(c_int))
cb = b.ctypes.data_as(POINTER(c_int))
else:
# otherwise convert using ctypes arrays
cr = (c_int * len(r))(*r)
cg = (c_int * len(g))(*g)
cb = (c_int * len(b))(*b)
_lib.TCOD_console_fill_background(con, cr, cg, cb)
def console_fill_char(con,arr) :
if (numpy_available and isinstance(arr, numpy.ndarray) ):
#numpy arrays, use numpy's ctypes functions
arr = numpy.ascontiguousarray(arr, dtype=numpy.int32)
carr = arr.ctypes.data_as(POINTER(c_int))
else:
#otherwise convert using the struct module
carr = struct.pack('%di' % len(arr), *arr)
_lib.TCOD_console_fill_char(con, carr)
def console_load_asc(con, filename) :
_lib.TCOD_console_load_asc(con,filename)
def console_save_asc(con, filename) :
_lib.TCOD_console_save_asc(con,filename)
def console_load_apf(con, filename) :
_lib.TCOD_console_load_apf(con,filename)
def console_save_apf(con, filename) :
_lib.TCOD_console_save_apf(con,filename)
############################
# sys module
############################
_lib.TCOD_sys_get_last_frame_length.restype = c_float
_lib.TCOD_sys_elapsed_seconds.restype = c_float
# high precision time functions
def sys_set_fps(fps):
_lib.TCOD_sys_set_fps(fps)
def sys_get_fps():
return _lib.TCOD_sys_get_fps()
def sys_get_last_frame_length():
return _lib.TCOD_sys_get_last_frame_length()
def sys_sleep_milli(val):
_lib.TCOD_sys_sleep_milli(c_uint(val))
def sys_elapsed_milli():
return _lib.TCOD_sys_elapsed_milli()
def sys_elapsed_seconds():
return _lib.TCOD_sys_elapsed_seconds()
def sys_set_renderer(renderer):
_lib.TCOD_sys_set_renderer(renderer)
def sys_get_renderer():
return _lib.TCOD_sys_get_renderer()
# easy screenshots
def sys_save_screenshot(name=0):
_lib.TCOD_sys_save_screenshot(c_char_p(name))
# custom fullscreen resolution
def sys_force_fullscreen_resolution(width, height):
_lib.TCOD_sys_force_fullscreen_resolution(width, height)
def sys_get_current_resolution():
w = c_int()
h = c_int()
_lib.TCOD_sys_get_current_resolution(byref(w), byref(h))
return w.value, h.value
def sys_get_char_size():
w = c_int()
h = c_int()
_lib.TCOD_sys_get_char_size(byref(w), byref(h))
return w.value, h.value
# update font bitmap
def sys_update_char(asciiCode, fontx, fonty, img, x, y) :
_lib.TCOD_sys_update_char(c_int(asciiCode),c_int(fontx),c_int(fonty),img,c_int(x),c_int(y))
# custom SDL post renderer
SDL_RENDERER_FUNC = CFUNCTYPE(None, c_void_p)
def sys_register_SDL_renderer(callback):
global sdl_renderer_func
sdl_renderer_func = SDL_RENDERER_FUNC(callback)
_lib.TCOD_sys_register_SDL_renderer(sdl_renderer_func)
# events
EVENT_NONE=0
EVENT_KEY_PRESS=1
EVENT_KEY_RELEASE=2
EVENT_KEY=EVENT_KEY_PRESS|EVENT_KEY_RELEASE
EVENT_MOUSE_MOVE=4
EVENT_MOUSE_PRESS=8
EVENT_MOUSE_RELEASE=16
EVENT_MOUSE=EVENT_MOUSE_MOVE|EVENT_MOUSE_PRESS|EVENT_MOUSE_RELEASE
EVENT_ANY=EVENT_KEY|EVENT_MOUSE
def sys_check_for_event(mask,k,m) :
return _lib.TCOD_sys_check_for_event(c_int(mask),byref(k),byref(m))
def sys_wait_for_event(mask,k,m,flush) :
return _lib.TCOD_sys_wait_for_event(c_int(mask),byref(k),byref(m),c_bool(flush))
############################
# line module
############################
_lib.TCOD_line_step.restype = c_bool
_lib.TCOD_line.restype=c_bool
_lib.TCOD_line_step_mt.restype = c_bool
def line_init(xo, yo, xd, yd):
_lib.TCOD_line_init(xo, yo, xd, yd)
def line_step():
x = c_int()
y = c_int()
ret = _lib.TCOD_line_step(byref(x), byref(y))
if not ret:
return x.value, y.value
return None,None
def line(xo,yo,xd,yd,py_callback) :
LINE_CBK_FUNC=CFUNCTYPE(c_bool,c_int,c_int)
c_callback=LINE_CBK_FUNC(py_callback)
return _lib.TCOD_line(xo,yo,xd,yd,c_callback)
def line_iter(xo, yo, xd, yd):
data = (c_int * 9)() # struct TCOD_bresenham_data_t
_lib.TCOD_line_init_mt(xo, yo, xd, yd, data)
x = c_int(xo)
y = c_int(yo)
done = False
while not done:
yield x.value, y.value
done = _lib.TCOD_line_step_mt(byref(x), byref(y), data)
############################
# image module
############################
_lib.TCOD_image_is_pixel_transparent.restype = c_bool
_lib.TCOD_image_get_pixel.restype = Color
_lib.TCOD_image_get_mipmap_pixel.restype = Color
def image_new(width, height):
return _lib.TCOD_image_new(width, height)
def image_clear(image,col) :
_lib.TCOD_image_clear(image,col)
def image_invert(image) :
_lib.TCOD_image_invert(image)
def image_hflip(image) :
_lib.TCOD_image_hflip(image)
def image_rotate90(image, num=1) :
_lib.TCOD_image_rotate90(image,num)
def image_vflip(image) :
_lib.TCOD_image_vflip(image)
def image_scale(image, neww, newh) :
_lib.TCOD_image_scale(image,c_int(neww),c_int(newh))
def image_set_key_color(image,col) :
_lib.TCOD_image_set_key_color(image,col)
def image_get_alpha(image,x,y) :
return _lib.TCOD_image_get_alpha(image,c_int(x),c_int(y))
def image_is_pixel_transparent(image,x,y) :
return _lib.TCOD_image_is_pixel_transparent(image,c_int(x),c_int(y))
def image_load(filename):
return _lib.TCOD_image_load(c_char_p(filename))
def image_from_console(console):
return _lib.TCOD_image_from_console(console)
def image_refresh_console(image, console):
_lib.TCOD_image_refresh_console(image, console)
def image_get_size(image):
w=c_int()
h=c_int()
_lib.TCOD_image_get_size(image, byref(w), byref(h))
return w.value, h.value
def image_get_pixel(image, x, y):
return _lib.TCOD_image_get_pixel(image, x, y)
def image_get_mipmap_pixel(image, x0, y0, x1, y1):
return _lib.TCOD_image_get_mipmap_pixel(image, c_float(x0), c_float(y0),
c_float(x1), c_float(y1))
def image_put_pixel(image, x, y, col):
_lib.TCOD_image_put_pixel(image, x, y, col)
##_lib.TCOD_image_put_pixel_wrapper(image, x, y, col)
def image_blit(image, console, x, y, bkgnd_flag, scalex, scaley, angle):
_lib.TCOD_image_blit(image, console, c_float(x), c_float(y), bkgnd_flag,
c_float(scalex), c_float(scaley), c_float(angle))
def image_blit_rect(image, console, x, y, w, h, bkgnd_flag):
_lib.TCOD_image_blit_rect(image, console, x, y, w, h, bkgnd_flag)
def image_blit_2x(image, console, dx, dy, sx=0, sy=0, w=-1, h=-1):
_lib.TCOD_image_blit_2x(image, console, dx,dy,sx,sy,w,h)
def image_save(image, filename):
_lib.TCOD_image_save(image, c_char_p(filename))
def image_delete(image):
_lib.TCOD_image_delete(image)
############################
# mouse module
############################
class Mouse(Structure):
_fields_=[('x', c_int),
('y', c_int),
('dx', c_int),
('dy', c_int),
('cx', c_int),
('cy', c_int),
('dcx', c_int),
('dcy', c_int),
('lbutton', c_bool),
('rbutton', c_bool),
('mbutton', c_bool),
('lbutton_pressed', c_bool),
('rbutton_pressed', c_bool),
('mbutton_pressed', c_bool),
('wheel_up', c_bool),
('wheel_down', c_bool),
]
_lib.TCOD_mouse_is_cursor_visible.restype = c_bool
def mouse_show_cursor(visible):
_lib.TCOD_mouse_show_cursor(c_int(visible))
def mouse_is_cursor_visible():
return _lib.TCOD_mouse_is_cursor_visible()
def mouse_move(x, y):
_lib.TCOD_mouse_move(x, y)
def mouse_get_status():
mouse=Mouse()
_lib.TCOD_mouse_get_status_wrapper(byref(mouse))
return mouse
############################
# parser module
############################
_lib.TCOD_struct_get_name.restype = c_char_p
_lib.TCOD_struct_is_mandatory.restype = c_bool
_lib.TCOD_parser_has_property.restype = c_bool
_lib.TCOD_parser_get_bool_property.restype = c_bool
_lib.TCOD_parser_get_float_property.restype = c_float
_lib.TCOD_parser_get_string_property.restype = c_char_p
_lib.TCOD_parser_get_color_property.restype = Color
class Dice(Structure):
_fields_=[('nb_dices', c_int),
('nb_faces', c_int),
('multiplier', c_float),
('addsub', c_float),
]
def __repr__(self):
return "Dice(%d, %d, %s, %s)" % (self.nb_dices, self.nb_faces,
self.multiplier, self.addsub)
class _CValue(Union):
_fields_=[('c',c_uint8),
('i',c_int),
('f',c_float),
('s',c_char_p),
# JBR03192012 See http://bugs.python.org/issue14354 for why these are not defined as their actual types
('col',c_uint8 * 3),
('dice',c_int * 4),
('custom',c_void_p),
]
_CFUNC_NEW_STRUCT = CFUNCTYPE(c_uint, c_void_p, c_char_p)
_CFUNC_NEW_FLAG = CFUNCTYPE(c_uint, c_char_p)
_CFUNC_NEW_PROPERTY = CFUNCTYPE(c_uint, c_char_p, c_int, _CValue)
class _CParserListener(Structure):
_fields_=[('new_struct', _CFUNC_NEW_STRUCT),
('new_flag',_CFUNC_NEW_FLAG),
('new_property',_CFUNC_NEW_PROPERTY),
('end_struct',_CFUNC_NEW_STRUCT),
('error',_CFUNC_NEW_FLAG),
]
# property types
TYPE_NONE = 0
TYPE_BOOL = 1
TYPE_CHAR = 2
TYPE_INT = 3
TYPE_FLOAT = 4
TYPE_STRING = 5
TYPE_COLOR = 6
TYPE_DICE = 7
TYPE_VALUELIST00 = 8
TYPE_VALUELIST01 = 9
TYPE_VALUELIST02 = 10
TYPE_VALUELIST03 = 11
TYPE_VALUELIST04 = 12
TYPE_VALUELIST05 = 13
TYPE_VALUELIST06 = 14
TYPE_VALUELIST07 = 15
TYPE_VALUELIST08 = 16
TYPE_VALUELIST09 = 17
TYPE_VALUELIST10 = 18
TYPE_VALUELIST11 = 19
TYPE_VALUELIST12 = 20
TYPE_VALUELIST13 = 21
TYPE_VALUELIST14 = 22
TYPE_VALUELIST15 = 23
TYPE_LIST = 1024
def _convert_TCODList(clist, typ):
res = list()
for i in range(_lib.TCOD_list_size(clist)):
elt = _lib.TCOD_list_get(clist, i)
elt = cast(elt, c_void_p)
if typ == TYPE_BOOL:
elt = c_bool.from_buffer(elt).value
elif typ == TYPE_CHAR:
elt = c_char.from_buffer(elt).value
elif typ == TYPE_INT:
elt = c_int.from_buffer(elt).value
elif typ == TYPE_FLOAT:
elt = c_float.from_buffer(elt).value
elif typ == TYPE_STRING or TYPE_VALUELIST15 >= typ >= TYPE_VALUELIST00:
elt = cast(elt, c_char_p).value
elif typ == TYPE_COLOR:
elt = Color.from_buffer_copy(elt)
elif typ == TYPE_DICE:
# doesn't work
elt = Dice.from_buffer_copy(elt)
res.append(elt)
return res
def parser_new():
return _lib.TCOD_parser_new()
def parser_new_struct(parser, name):
return _lib.TCOD_parser_new_struct(parser, name)
def struct_add_flag(struct, name):
_lib.TCOD_struct_add_flag(struct, name)
def struct_add_property(struct, name, typ, mandatory):
_lib.TCOD_struct_add_property(struct, name, typ, c_bool(mandatory))
def struct_add_value_list(struct, name, value_list, mandatory):
CARRAY = c_char_p * (len(value_list) + 1)
cvalue_list = CARRAY()
for i in range(len(value_list)):
cvalue_list[i] = cast(value_list[i], c_char_p)
cvalue_list[len(value_list)] = 0
_lib.TCOD_struct_add_value_list(struct, name, cvalue_list, c_bool(mandatory))
def struct_add_list_property(struct, name, typ, mandatory):
_lib.TCOD_struct_add_list_property(struct, name, typ, c_bool(mandatory))
def struct_add_structure(struct, sub_struct):
_lib.TCOD_struct_add_structure(struct, sub_struct)
def struct_get_name(struct):
return _lib.TCOD_struct_get_name(struct)
def struct_is_mandatory(struct, name):
return _lib.TCOD_struct_is_mandatory(struct, name)
def struct_get_type(struct, name):
return _lib.TCOD_struct_get_type(struct, name)
def parser_run(parser, filename, listener=0):
if listener != 0:
clistener=_CParserListener()
def value_converter(name, typ, value):
if typ == TYPE_BOOL:
return listener.new_property(name, typ, value.c == 1)
elif typ == TYPE_CHAR:
return listener.new_property(name, typ, '%c' % (value.c & 0xFF))
elif typ == TYPE_INT:
return listener.new_property(name, typ, value.i)
elif typ == TYPE_FLOAT:
return listener.new_property(name, typ, value.f)
elif typ == TYPE_STRING or \
TYPE_VALUELIST15 >= typ >= TYPE_VALUELIST00:
return listener.new_property(name, typ, value.s)
elif typ == TYPE_COLOR:
col = cast(value.col, POINTER(Color)).contents
return listener.new_property(name, typ, col)
elif typ == TYPE_DICE:
dice = cast(value.dice, POINTER(Dice)).contents
return listener.new_property(name, typ, dice)
elif typ & TYPE_LIST:
return listener.new_property(name, typ,
_convert_TCODList(value.custom, typ & 0xFF))
return True
clistener.new_struct = _CFUNC_NEW_STRUCT(listener.new_struct)
clistener.new_flag = _CFUNC_NEW_FLAG(listener.new_flag)
clistener.new_property = _CFUNC_NEW_PROPERTY(value_converter)
clistener.end_struct = _CFUNC_NEW_STRUCT(listener.end_struct)
clistener.error = _CFUNC_NEW_FLAG(listener.error)
_lib.TCOD_parser_run(parser, c_char_p(filename), byref(clistener))
else:
_lib.TCOD_parser_run(parser, c_char_p(filename), 0)
def parser_delete(parser):
_lib.TCOD_parser_delete(parser)
def parser_has_property(parser, name):
return _lib.TCOD_parser_has_property(parser, c_char_p(name))
def parser_get_bool_property(parser, name):
return _lib.TCOD_parser_get_bool_property(parser, c_char_p(name))
def parser_get_int_property(parser, name):
return _lib.TCOD_parser_get_int_property(parser, c_char_p(name))
def parser_get_char_property(parser, name):
return '%c' % _lib.TCOD_parser_get_char_property(parser, c_char_p(name))
def parser_get_float_property(parser, name):
return _lib.TCOD_parser_get_float_property(parser, c_char_p(name))
def parser_get_string_property(parser, name):
return _lib.TCOD_parser_get_string_property(parser, c_char_p(name))
def parser_get_color_property(parser, name):
return _lib.TCOD_parser_get_color_property(parser, c_char_p(name))
def parser_get_dice_property(parser, name):
d = Dice()
_lib.TCOD_parser_get_dice_property_py(c_void_p(parser), c_char_p(name), byref(d))
return d
def parser_get_list_property(parser, name, typ):
clist = _lib.TCOD_parser_get_list_property(parser, c_char_p(name), c_int(typ))
return _convert_TCODList(clist, typ)
############################
# random module
############################
_lib.TCOD_random_get_float.restype = c_float
_lib.TCOD_random_get_double.restype = c_double
RNG_MT = 0
RNG_CMWC = 1
DISTRIBUTION_LINEAR = 0
DISTRIBUTION_GAUSSIAN = 1
DISTRIBUTION_GAUSSIAN_RANGE = 2
DISTRIBUTION_GAUSSIAN_INVERSE = 3
DISTRIBUTION_GAUSSIAN_RANGE_INVERSE = 4
def random_get_instance():
return _lib.TCOD_random_get_instance()
def random_new(algo=RNG_CMWC):
return _lib.TCOD_random_new(algo)
def random_new_from_seed(seed, algo=RNG_CMWC):
return _lib.TCOD_random_new_from_seed(algo,c_uint(seed))
def random_set_distribution(rnd, dist) :
_lib.TCOD_random_set_distribution(rnd, dist)
def random_get_int(rnd, mi, ma):
return _lib.TCOD_random_get_int(rnd, mi, ma)
def random_get_float(rnd, mi, ma):
return _lib.TCOD_random_get_float(rnd, c_float(mi), c_float(ma))
def random_get_double(rnd, mi, ma):
return _lib.TCOD_random_get_double(rnd, c_double(mi), c_double(ma))
def random_get_int_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_int_mean(rnd, mi, ma, mean)
def random_get_float_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_float_mean(rnd, c_float(mi), c_float(ma), c_float(mean))
def random_get_double_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_double_mean(rnd, c_double(mi), c_double(ma), c_double(mean))
def random_save(rnd):
return _lib.TCOD_random_save(rnd)
def random_restore(rnd, backup):
_lib.TCOD_random_restore(rnd, backup)
def random_delete(rnd):
_lib.TCOD_random_delete(rnd)
############################
# noise module
############################
_lib.TCOD_noise_get.restype = c_float
_lib.TCOD_noise_get_ex.restype = c_float
_lib.TCOD_noise_get_fbm.restype = c_float
_lib.TCOD_noise_get_fbm_ex.restype = c_float
_lib.TCOD_noise_get_turbulence.restype = c_float
_lib.TCOD_noise_get_turbulence_ex.restype = c_float
NOISE_DEFAULT_HURST = 0.5
NOISE_DEFAULT_LACUNARITY = 2.0
NOISE_DEFAULT = 0
NOISE_PERLIN = 1
NOISE_SIMPLEX = 2
NOISE_WAVELET = 4
_NOISE_PACKER_FUNC = (None,
(c_float * 1),
(c_float * 2),
(c_float * 3),
(c_float * 4),
)
def noise_new(dim, h=NOISE_DEFAULT_HURST, l=NOISE_DEFAULT_LACUNARITY, random=0):
return _lib.TCOD_noise_new(dim, c_float(h), c_float(l), random)
def noise_set_type(n, typ) :
_lib.TCOD_noise_set_type(n,typ)
def noise_get(n, f, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), typ)
def noise_get_fbm(n, f, oc, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_fbm_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), c_float(oc), typ)
def noise_get_turbulence(n, f, oc, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_turbulence_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), c_float(oc), typ)
def noise_delete(n):
_lib.TCOD_noise_delete(n)
############################
# fov module
############################
_lib.TCOD_map_is_in_fov.restype = c_bool
_lib.TCOD_map_is_transparent.restype = c_bool
_lib.TCOD_map_is_walkable.restype = c_bool
FOV_BASIC = 0
FOV_DIAMOND = 1
FOV_SHADOW = 2
FOV_PERMISSIVE_0 = 3
FOV_PERMISSIVE_1 = 4
FOV_PERMISSIVE_2 = 5
FOV_PERMISSIVE_3 = 6
FOV_PERMISSIVE_4 = 7
FOV_PERMISSIVE_5 = 8
FOV_PERMISSIVE_6 = 9
FOV_PERMISSIVE_7 = 10
FOV_PERMISSIVE_8 = 11
FOV_RESTRICTIVE = 12
NB_FOV_ALGORITHMS = 13
def FOV_PERMISSIVE(p) :
return FOV_PERMISSIVE_0+p
def map_new(w, h):
return _lib.TCOD_map_new(w, h)
def map_copy(source, dest):
return _lib.TCOD_map_copy(source, dest)
def map_set_properties(m, x, y, isTrans, isWalk):
_lib.TCOD_map_set_properties(m, x, y, c_int(isTrans), c_int(isWalk))
def map_clear(m,walkable=False,transparent=False):
_lib.TCOD_map_clear(m,c_int(walkable),c_int(transparent))
def map_compute_fov(m, x, y, radius=0, light_walls=True, algo=FOV_RESTRICTIVE ):
_lib.TCOD_map_compute_fov(m, x, y, c_int(radius), c_bool(light_walls), c_int(algo))
def map_is_in_fov(m, x, y):
return _lib.TCOD_map_is_in_fov(m, x, y)
def map_is_transparent(m, x, y):
return _lib.TCOD_map_is_transparent(m, x, y)
def map_is_walkable(m, x, y):
return _lib.TCOD_map_is_walkable(m, x, y)
def map_delete(m):
return _lib.TCOD_map_delete(m)
def map_get_width(map):
return _lib.TCOD_map_get_width(map)
def map_get_height(map):
return _lib.TCOD_map_get_height(map)
############################
# pathfinding module
############################
_lib.TCOD_path_compute.restype = c_bool
_lib.TCOD_path_is_empty.restype = c_bool
_lib.TCOD_path_walk.restype = c_bool
PATH_CBK_FUNC = CFUNCTYPE(c_float, c_int, c_int, c_int, c_int, py_object)
def path_new_using_map(m, dcost=1.41):
return (_lib.TCOD_path_new_using_map(c_void_p(m), c_float(dcost)), None)
def path_new_using_function(w, h, func, userdata=0, dcost=1.41):
cbk_func = PATH_CBK_FUNC(func)
return (_lib.TCOD_path_new_using_function(w, h, cbk_func,
py_object(userdata), c_float(dcost)), cbk_func)
def path_compute(p, ox, oy, dx, dy):
return _lib.TCOD_path_compute(p[0], ox, oy, dx, dy)
def path_get_origin(p):
x = c_int()
y = c_int()
_lib.TCOD_path_get_origin(p[0], byref(x), byref(y))
return x.value, y.value
def path_get_destination(p):
x = c_int()
y = c_int()
_lib.TCOD_path_get_destination(p[0], byref(x), byref(y))
return x.value, y.value
def path_size(p):
return _lib.TCOD_path_size(p[0])
def path_reverse(p):
_lib.TCOD_path_reverse(p[0])
def path_get(p, idx):
x = c_int()
y = c_int()
_lib.TCOD_path_get(p[0], idx, byref(x), byref(y))
return x.value, y.value
def path_is_empty(p):
return _lib.TCOD_path_is_empty(p[0])
def path_walk(p, recompute):
x = c_int()
y = c_int()
if _lib.TCOD_path_walk(p[0], byref(x), byref(y), c_int(recompute)):
return x.value, y.value
return None,None
def path_delete(p):
_lib.TCOD_path_delete(p[0])
_lib.TCOD_dijkstra_path_set.restype = c_bool
_lib.TCOD_dijkstra_is_empty.restype = c_bool
_lib.TCOD_dijkstra_path_walk.restype = c_bool
_lib.TCOD_dijkstra_get_distance.restype = c_float
def dijkstra_new(m, dcost=1.41):
return (_lib.TCOD_dijkstra_new(c_void_p(m), c_float(dcost)), None)
def dijkstra_new_using_function(w, h, func, userdata=0, dcost=1.41):
cbk_func = PATH_CBK_FUNC(func)
return (_lib.TCOD_path_dijkstra_using_function(w, h, cbk_func,
py_object(userdata), c_float(dcost)), cbk_func)
def dijkstra_compute(p, ox, oy):
_lib.TCOD_dijkstra_compute(p[0], c_int(ox), c_int(oy))
def dijkstra_path_set(p, x, y):
return _lib.TCOD_dijkstra_path_set(p[0], c_int(x), c_int(y))
def dijkstra_get_distance(p, x, y):
return _lib.TCOD_dijkstra_get_distance(p[0], c_int(x), c_int(y))
def dijkstra_size(p):
return _lib.TCOD_dijkstra_size(p[0])
def dijkstra_reverse(p):
_lib.TCOD_dijkstra_reverse(p[0])
def dijkstra_get(p, idx):
x = c_int()
y = c_int()
_lib.TCOD_dijkstra_get(p[0], c_int(idx), byref(x), byref(y))
return x.value, y.value
def dijkstra_is_empty(p):
return _lib.TCOD_dijkstra_is_empty(p[0])
def dijkstra_path_walk(p):
x = c_int()
y = c_int()
if _lib.TCOD_dijkstra_path_walk(p[0], byref(x), byref(y)):
return x.value, y.value
return None,None
def dijkstra_delete(p):
_lib.TCOD_dijkstra_delete(p[0])
############################
# bsp module
############################
class _CBsp(Structure):
_fields_ = [('next', c_void_p),
('father', c_void_p),
('son', c_void_p),
('x', c_int),
('y', c_int),
('w', c_int),
('h', c_int),
('position', c_int),
('level', c_uint8),
('horizontal', c_bool),
]
_lib.TCOD_bsp_new_with_size.restype = POINTER(_CBsp)
_lib.TCOD_bsp_left.restype = POINTER(_CBsp)
_lib.TCOD_bsp_right.restype = POINTER(_CBsp)
_lib.TCOD_bsp_father.restype = POINTER(_CBsp)
_lib.TCOD_bsp_is_leaf.restype = c_bool
_lib.TCOD_bsp_contains.restype = c_bool
_lib.TCOD_bsp_find_node.restype = POINTER(_CBsp)
BSP_CBK_FUNC = CFUNCTYPE(c_int, c_void_p, c_void_p)
# python class encapsulating the _CBsp pointer
class Bsp(object):
def __init__(self, cnode):
pcbsp = cast(cnode, POINTER(_CBsp))
self.p = pcbsp
def getx(self):
return self.p.contents.x
def setx(self, value):
self.p.contents.x = value
x = property(getx, setx)
def gety(self):
return self.p.contents.y
def sety(self, value):
self.p.contents.y = value
y = property(gety, sety)
def getw(self):
return self.p.contents.w
def setw(self, value):
self.p.contents.w = value
w = property(getw, setw)
def geth(self):
return self.p.contents.h
def seth(self, value):
self.p.contents.h = value
h = property(geth, seth)
def getpos(self):
return self.p.contents.position
def setpos(self, value):
self.p.contents.position = value
position = property(getpos, setpos)
def gethor(self):
return self.p.contents.horizontal
def sethor(self,value):
self.p.contents.horizontal = value
horizontal = property(gethor, sethor)
def getlev(self):
return self.p.contents.level
def setlev(self,value):
self.p.contents.level = value
level = property(getlev, setlev)
def bsp_new_with_size(x, y, w, h):
return Bsp(_lib.TCOD_bsp_new_with_size(x, y, w, h))
def bsp_split_once(node, horizontal, position):
_lib.TCOD_bsp_split_once(node.p, c_int(horizontal), position)
def bsp_split_recursive(node, randomizer, nb, minHSize, minVSize, maxHRatio,
maxVRatio):
_lib.TCOD_bsp_split_recursive(node.p, randomizer, nb, minHSize, minVSize,
c_float(maxHRatio), c_float(maxVRatio))
def bsp_resize(node, x, y, w, h):
_lib.TCOD_bsp_resize(node.p, x, y, w, h)
def bsp_left(node):
return Bsp(_lib.TCOD_bsp_left(node.p))
def bsp_right(node):
return Bsp(_lib.TCOD_bsp_right(node.p))
def bsp_father(node):
return Bsp(_lib.TCOD_bsp_father(node.p))
def bsp_is_leaf(node):
return _lib.TCOD_bsp_is_leaf(node.p)
def bsp_contains(node, cx, cy):
return _lib.TCOD_bsp_contains(node.p, cx, cy)
def bsp_find_node(node, cx, cy):
return Bsp(_lib.TCOD_bsp_find_node(node.p, cx, cy))
def _bsp_traverse(node, callback, userData, func):
# convert the c node into a python node
#before passing it to the actual callback
def node_converter(cnode, data):
node = Bsp(cnode)
return callback(node, data)
cbk_func = BSP_CBK_FUNC(node_converter)
func(node.p, cbk_func, userData)
def bsp_traverse_pre_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_pre_order)
def bsp_traverse_in_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_in_order)
def bsp_traverse_post_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_post_order)
def bsp_traverse_level_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_level_order)
def bsp_traverse_inverted_level_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData,
_lib.TCOD_bsp_traverse_inverted_level_order)
def bsp_remove_sons(node):
_lib.TCOD_bsp_remove_sons(node.p)
def bsp_delete(node):
_lib.TCOD_bsp_delete(node.p)
############################
# heightmap module
############################
class _CHeightMap(Structure):
_fields_=[('w', c_int),
('h', c_int),
('values', POINTER(c_float)),
]
_lib.TCOD_heightmap_new.restype = POINTER(_CHeightMap)
_lib.TCOD_heightmap_get_value.restype = c_float
_lib.TCOD_heightmap_has_land_on_border.restype = c_bool
class HeightMap(object):
def __init__(self, chm):
pchm = cast(chm, POINTER(_CHeightMap))
self.p = pchm
def getw(self):
return self.p.contents.w
def setw(self, value):
self.p.contents.w = value
w = property(getw, setw)
def geth(self):
return self.p.contents.h
def seth(self, value):
self.p.contents.h = value
h = property(geth, seth)
def heightmap_new(w, h):
phm = _lib.TCOD_heightmap_new(w, h)
return HeightMap(phm)
def heightmap_set_value(hm, x, y, value):
_lib.TCOD_heightmap_set_value(hm.p, x, y, c_float(value))
def heightmap_add(hm, value):
_lib.TCOD_heightmap_add(hm.p, c_float(value))
def heightmap_scale(hm, value):
_lib.TCOD_heightmap_scale(hm.p, c_float(value))
def heightmap_clear(hm):
_lib.TCOD_heightmap_clear(hm.p)
def heightmap_clamp(hm, mi, ma):
_lib.TCOD_heightmap_clamp(hm.p, c_float(mi),c_float(ma))
def heightmap_copy(hm1, hm2):
_lib.TCOD_heightmap_copy(hm1.p, hm2.p)
def heightmap_normalize(hm, mi=0.0, ma=1.0):
_lib.TCOD_heightmap_normalize(hm.p, c_float(mi), c_float(ma))
def heightmap_lerp_hm(hm1, hm2, hm3, coef):
_lib.TCOD_heightmap_lerp_hm(hm1.p, hm2.p, hm3.p, c_float(coef))
def heightmap_add_hm(hm1, hm2, hm3):
_lib.TCOD_heightmap_add_hm(hm1.p, hm2.p, hm3.p)
def heightmap_multiply_hm(hm1, hm2, hm3):
_lib.TCOD_heightmap_multiply_hm(hm1.p, hm2.p, hm3.p)
def heightmap_add_hill(hm, x, y, radius, height):
_lib.TCOD_heightmap_add_hill(hm.p, c_float( x), c_float( y),
c_float( radius), c_float( height))
def heightmap_dig_hill(hm, x, y, radius, height):
_lib.TCOD_heightmap_dig_hill(hm.p, c_float( x), c_float( y),
c_float( radius), c_float( height))
def heightmap_mid_point_displacement(hm, rng, roughness):
_lib.TCOD_heightmap_mid_point_displacement(hm.p, rng, c_float(roughness))
def heightmap_rain_erosion(hm, nbDrops, erosionCoef, sedimentationCoef, rnd=0):
_lib.TCOD_heightmap_rain_erosion(hm.p, nbDrops, c_float( erosionCoef),
c_float( sedimentationCoef), rnd)
def heightmap_kernel_transform(hm, kernelsize, dx, dy, weight, minLevel,
maxLevel):
FARRAY = c_float * kernelsize
IARRAY = c_int * kernelsize
cdx = IARRAY(*dx)
cdy = IARRAY(*dy)
cweight = FARRAY(*weight)
_lib.TCOD_heightmap_kernel_transform(hm.p, kernelsize, cdx, cdy, cweight,
c_float(minLevel), c_float(maxLevel))
def heightmap_add_voronoi(hm, nbPoints, nbCoef, coef, rnd=0):
FARRAY = c_float * nbCoef
ccoef = FARRAY(*coef)
_lib.TCOD_heightmap_add_voronoi(hm.p, nbPoints, nbCoef, ccoef, rnd)
def heightmap_add_fbm(hm, noise, mulx, muly, addx, addy, octaves, delta, scale):
_lib.TCOD_heightmap_add_fbm(hm.p, noise, c_float(mulx), c_float(muly),
c_float(addx), c_float(addy),
c_float(octaves), c_float(delta),
c_float(scale))
def heightmap_scale_fbm(hm, noise, mulx, muly, addx, addy, octaves, delta,
scale):
_lib.TCOD_heightmap_scale_fbm(hm.p, noise, c_float(mulx), c_float(muly),
c_float(addx), c_float(addy),
c_float(octaves), c_float(delta),
c_float(scale))
def heightmap_dig_bezier(hm, px, py, startRadius, startDepth, endRadius,
endDepth):
IARRAY = c_int * 4
cpx = IARRAY(*px)
cpy = IARRAY(*py)
_lib.TCOD_heightmap_dig_bezier(hm.p, cpx, cpy, c_float(startRadius),
c_float(startDepth), c_float(endRadius),
c_float(endDepth))
def heightmap_get_value(hm, x, y):
return _lib.TCOD_heightmap_get_value(hm.p, x, y)
def heightmap_get_interpolated_value(hm, x, y):
return _lib.TCOD_heightmap_get_interpolated_value(hm.p, c_float(x),
c_float(y))
def heightmap_get_slope(hm, x, y):
return _lib.TCOD_heightmap_get_slope(hm.p, x, y)
def heightmap_get_normal(hm, x, y, waterLevel):
FARRAY = c_float * 3
cn = FARRAY()
_lib.TCOD_heightmap_get_normal(hm.p, c_float(x), c_float(y), cn,
c_float(waterLevel))
return cn[0], cn[1], cn[2]
def heightmap_count_cells(hm, mi, ma):
return _lib.TCOD_heightmap_count_cells(hm.p, c_float(mi), c_float(ma))
def heightmap_has_land_on_border(hm, waterlevel):
return _lib.TCOD_heightmap_has_land_on_border(hm.p, c_float(waterlevel))
def heightmap_get_minmax(hm):
mi = c_float()
ma = c_float()
_lib.TCOD_heightmap_get_minmax(hm.p, byref(mi), byref(ma))
return mi.value, ma.value
def heightmap_delete(hm):
_lib.TCOD_heightmap_delete(hm.p)
############################
# name generator module
############################
_lib.TCOD_namegen_generate.restype = c_char_p
_lib.TCOD_namegen_generate_custom.restype = c_char_p
def namegen_parse(filename,random=0) :
_lib.TCOD_namegen_parse(filename,random)
def namegen_generate(name) :
return _lib.TCOD_namegen_generate(name, 0)
def namegen_generate_custom(name, rule) :
return _lib.TCOD_namegen_generate(name, rule, 0)
def namegen_get_sets():
nb=_lib.TCOD_namegen_get_nb_sets_wrapper()
SARRAY = c_char_p * nb;
setsa = SARRAY()
_lib.TCOD_namegen_get_sets_wrapper(setsa)
return list(setsa)
def namegen_destroy() :
_lib.TCOD_namegen_destroy()
|
v4nz666/MineClimbeR-L-
|
RoguePy/libtcod/libtcodpy.py
|
Python
|
mit
| 61,359
|
[
"Amber"
] |
bbaf11ee6415761d7ec0a707a08f99db2489c6f1a241f274191236c21c70a6bc
|
import copy
import numpy as np
import pytest
import qcelemental as qcel
from qcelemental.testing import compare, compare_recursive, compare_values, tnm
import qcengine as qcng
from qcengine.programs import empirical_dispersion_resources
from qcengine.testing import is_program_new_enough, using
from .addons import using
from qcengine.programs.tests import test_dftd3_mp2d
ref, gref = test_dftd3_mp2d.ref, test_dftd3_mp2d.gref
pytestmark = [pytest.mark.quick]
@using("dftd3")
@pytest.mark.parametrize("method", [
"b3lyp-d3",
"b3lyp-d3m",
"b3lyp-d3bj",
"b3lyp-d3mbj",
])
def test_dftd3_task(method):
json_data = {"molecule": qcng.get_molecule("eneyne"), "driver": "energy", "model": {"method": method}}
ret = qcng.compute(json_data, "dftd3", raise_error=True, return_dict=True)
assert ret["driver"] == "energy"
assert "provenance" in ret
assert "normal termination of dftd3" in ret["stdout"]
for key in ["cpu", "hostname", "username", "wall_time"]:
assert key in ret["provenance"]
assert ret["success"] is True
seneyne = """
C 0.000000 -0.667578 -2.124659
C 0.000000 0.667578 -2.124659
H 0.923621 -1.232253 -2.126185
H -0.923621 -1.232253 -2.126185
H -0.923621 1.232253 -2.126185
H 0.923621 1.232253 -2.126185
--
C 0.000000 0.000000 2.900503
C 0.000000 0.000000 1.693240
H 0.000000 0.000000 0.627352
H 0.000000 0.000000 3.963929
"""
sne = """
Ne 0 0 0
"""
def eneyne_ne_qcdbmols():
if not is_program_new_enough("psi4", "1.4a1.dev55"):
pytest.skip("Psi4 requires at least Psi4 v1.3rc2")
from psi4.driver import qcdb
eneyne = qcdb.Molecule(seneyne)
ne = qcdb.Molecule(sne)
mols = {
'eneyne': {
'dimer': eneyne,
'mA': eneyne.extract_subsets(1),
'mB': eneyne.extract_subsets(2),
'mAgB': eneyne.extract_subsets(1, 2),
'gAmB': eneyne.extract_subsets(2, 1),
},
'ne': {
'atom': ne,
}
}
return mols
def eneyne_ne_psi4mols():
if not is_program_new_enough("psi4", "1.4a1.dev55"):
pytest.skip("Psi4 requires at least Psi4 v1.3rc2")
import psi4
eneyne = psi4.core.Molecule.from_string(seneyne)
ne = psi4.core.Molecule.from_string(sne)
mols = {
'eneyne': {
'dimer': eneyne,
'mA': eneyne.extract_subsets(1),
'mB': eneyne.extract_subsets(2),
'mAgB': eneyne.extract_subsets(1, 2),
'gAmB': eneyne.extract_subsets(2, 1),
},
'ne': {
'atom': ne,
}
}
return mols
def eneyne_ne_qcschemamols():
eneyne = qcel.molparse.to_schema(qcel.molparse.from_string(seneyne)['qm'], dtype=2)
mA = qcel.molparse.to_schema(qcel.molparse.from_string('\n'.join(seneyne.splitlines()[:7]))['qm'], dtype=2)
mB = qcel.molparse.to_schema(qcel.molparse.from_string('\n'.join(seneyne.splitlines()[-4:]))['qm'], dtype=2)
ne = qcel.molparse.to_schema(qcel.molparse.from_string(sne)['qm'], dtype=2)
mAgB = qcel.molparse.from_string(seneyne)['qm']
mAgB['real'] = [(iat < mAgB['fragment_separators'][0])
for iat in range(len(mAgB['elem']))] # works b/c chgmult doesn't need refiguring
mAgB = qcel.molparse.to_schema(mAgB, dtype=2)
gAmB = qcel.molparse.from_string(seneyne)['qm']
gAmB['real'] = [(iat >= gAmB['fragment_separators'][0]) for iat in range(len(gAmB['elem']))]
gAmB = qcel.molparse.to_schema(gAmB, dtype=2)
mols = {
'eneyne': {
'dimer': eneyne,
'mA': mA,
'mB': mB,
'mAgB': mAgB,
'gAmB': gAmB,
},
'ne': {
'atom': ne,
}
}
return mols
db3lypd3bj = {
'dashlevel': 'd3bj',
'dashparams': {
's8': 1.9889,
's6': 1.0,
'a2': 4.4211,
'a1': 0.3981
},
'dashparams_citation': '',
'fctldash': 'b3lyp-d3(bj)'
}
db3lypd3bjcustom = copy.deepcopy(db3lypd3bj)
db3lypd3bjcustom['fctldash'] = ''
db3lypd3bjcustom['dashparams']['a2'] = 5.4211
dpbed3zero = {
'dashlevel': 'd3zero',
'dashparams': {
's6': 1.0,
's8': 0.722,
'sr6': 1.217,
'sr8': 1.0,
'alpha6': 14.0
},
'dashparams_citation': '',
'fctldash': 'pbe-d3'
}
atmgr = {
'dashlevel': 'atmgr',
'dashparams': {
'alpha6': 14.0,
},
'dashparams_citation': '',
'fctldash': 'atm(gr)',
}
chg = {
'dashlevel': 'chg',
'dashparams': {
's6': 1.0,
},
'dashparams_citation': '',
'fctldash': 'chg',
}
dmp2dmp2 = {
'dashlevel': 'dmp2',
'dashparams': {
's8': 1.187,
'a1': 0.944,
'a2': 0.480,
'rcut': 0.72,
'w': 0.20,
},
'dashparams_citation': '',
'fctldash': 'mp2-dmp2'
}
def _compute_key(pjrec):
return pjrec['fctldash'].upper()
## Tests
@pytest.mark.parametrize("inp,expected", [
(({'name_hint': 'b3lyp', 'level_hint': 'd3bj'}, 'B3LYP-D3(BJ)'), db3lypd3bj),
(({'name_hint': 'b3LYP', 'level_hint': 'D3bj'}, 'B3LYP-D3(BJ)'), db3lypd3bj),
(({'param_tweaks': {'s8': 1.9889, 's6': 1.0, 'a2': 4.4211, 'a1': 0.3981}, 'level_hint': 'd3bj'}, 'B3LYP-D3(BJ)'), db3lypd3bj),
(({'name_hint': 'b3lyp', 'level_hint': 'd3bJ', 'param_tweaks': {'a2': 4.4211}}, 'B3LYP-D3(BJ)'), db3lypd3bj),
(({'verbose': 3, 'name_hint': 'b3lyp', 'level_hint': 'd3bJ', 'param_tweaks': {'a2': 5.4211}}, ''), db3lypd3bjcustom),
(({'name_hint': 'b3lyp-d3bj', 'param_tweaks': {'a2': 4.4211}}, 'B3LYP-D3(BJ)'), db3lypd3bj),
(({'name_hint': 'pbe', 'level_hint': 'd3zero'}, 'PBE-D3'), dpbed3zero),
(({'name_hint': 'pbe', 'level_hint': 'd3'}, 'PBE-D3'), dpbed3zero),
(({'name_hint': 'pbe-d3'}, 'PBE-D3'), dpbed3zero),
(({'name_hint': 'atm(gr)', 'level_hint': 'atmgr'}, 'ATM(GR)'), atmgr),
(({'name_hint': 'atmgr'}, 'ATM(GR)'), atmgr),
(({'name_hint': 'bp86-atmgr'}, 'ATM(GR)'), atmgr),
(({'name_hint': 'asdf-chg'}, 'CHG'), chg),
(({'name_hint': 'mp2-dmp2'}, 'MP2-DMP2'), dmp2dmp2),
(({'name_hint': 'MP2', 'level_hint': 'dmp2'}, 'MP2-DMP2'), dmp2dmp2),
]) # yapf: disable
def test_dftd3__from_arrays(inp, expected):
res = empirical_dispersion_resources.from_arrays(**inp[0])
assert compare_recursive(expected, res, atol=1.e-4)
assert compare(inp[1], _compute_key(res), 'key')
res = empirical_dispersion_resources.from_arrays(name_hint=res['fctldash'], level_hint=res['dashlevel'], param_tweaks=res['dashparams'])
assert compare_recursive(expected, res, tnm() + ' idempotent', atol=1.e-4)
@pytest.mark.parametrize("inp", [
({'name_hint': 'b3lyp', 'level_hint': 'd3bJ', 'param_tweaks': {'a3': 5.4211}}),
({'name_hint': 'fakeb3lyp', 'level_hint': 'd3bJ', 'param_tweaks': {'s6': 5.4211}}),
({'level_hint': 'd3bJ', 'param_tweaks': {'s6': 5.4211}}),
({'name_hint': 'b3lyp-d3bj', 'param_tweaks': {'a2': 4.4211, 'zzz': 0.0}}),
({'name_hint': 'asdf-d4'}),
({'name_hint': 'atm(gr)', 'level_hint': 'chg'}),
]) # yapf:disable
def test_dftd3__from_arrays__error(inp):
with pytest.raises(qcng.exceptions.InputError):
empirical_dispersion_resources.from_arrays(**inp)
def test_dftd3__from_arrays__supplement():
ans = {
'dashlevel': 'chg',
'dashparams': {
's6': 4.05
},
'fctldash': 'asdf-d4',
'dashparams_citation': ' mypaper\n'
}
supp = {'chg': {'definitions': {'asdf-d4': {'params': {'s6': 4.05}, 'citation': ' mypaper\n'}}}}
res = empirical_dispersion_resources.from_arrays(name_hint='asdf-d4', level_hint='chg', dashcoeff_supplement=supp)
assert compare_recursive(ans, res, atol=1.e-4)
with pytest.raises(qcng.exceptions.InputError) as e:
empirical_dispersion_resources.from_arrays(name_hint=res['fctldash'], level_hint=res['dashlevel'], param_tweaks=res['dashparams'])
assert "Can't guess -D correction level" in str(e.value)
res = empirical_dispersion_resources.from_arrays(
name_hint=res['fctldash'],
level_hint=res['dashlevel'],
param_tweaks=res['dashparams'],
dashcoeff_supplement=supp)
assert compare_recursive(ans, res, tnm() + ' idempotent', atol=1.e-4)
@using("dftd3")
def test_3():
sys = qcel.molparse.from_string(seneyne)['qm']
resinp = {
'schema_name': 'qcschema_input',
'schema_version': 1,
'molecule': qcel.molparse.to_schema(sys, dtype=2),
'driver': 'energy',
'model': {
'method': 'b3lyp',
},
'keywords': {
'level_hint': 'd3bj'
},
}
res = qcng.compute(resinp, 'dftd3', raise_error=True)
res = res.dict()
#res = dftd3.run_dftd3_from_arrays(molrec=sys, name_hint='b3lyp', level_hint='d3bj')
assert compare('B3LYP-D3(BJ)', _compute_key(res['extras']['local_keywords']), 'key')
@using("dftd3")
@pytest.mark.parametrize(
"subjects",
[
pytest.param(eneyne_ne_psi4mols, marks=using("psi4")),
pytest.param(eneyne_ne_qcdbmols,
marks=using("psi4")), # needs qcdb.Molecule, presently more common in psi4 than in qcdb
],
ids=['qmol', 'pmol'])
@pytest.mark.parametrize(
"inp", [
({'first': 'b3lyp', 'second': 'd', 'parent': 'eneyne', 'subject': 'dimer', 'lbl': 'B3LYP-D2'}),
({'first': 'b3lyp', 'second': 'd3bj', 'parent': 'eneyne', 'subject': 'mA', 'lbl': 'B3LYP-D3(BJ)'}),
({'first': 'pbe', 'second': 'd3zero', 'parent': 'eneyne', 'subject': 'mB', 'lbl': 'PBE-D3'}),
({'first': 'pbe', 'second': 'd3zero', 'parent': 'eneyne', 'subject': 'gAmB', 'lbl': 'PBE-D3'}),
({'first': 'pbe', 'second': 'd2', 'parent': 'eneyne', 'subject': 'mAgB', 'lbl': 'PBE-D2'}),
({'first': 'b3lyp', 'second': 'd3bj', 'parent': 'ne', 'subject': 'atom', 'lbl': 'B3LYP-D3(BJ)'}),
#({'first': '', 'second': 'atmgr', 'parent': 'eneyne', 'subject': 'dimer', 'lbl': 'ATM'}),
#({'first': 'b3lyp', 'second': 'atmgr', 'parent': 'eneyne', 'subject': 'mA', 'lbl': 'ATM'}),
#({'first': 'pbe', 'second': 'atm(gr)', 'parent': 'eneyne', 'subject': 'mB', 'lbl': 'ATM'}),
#({'first': '', 'second': 'ATMgr', 'parent': 'eneyne', 'subject': 'mAgB', 'lbl': 'ATM'}),
# below two xfail until dftd3 that's only 2-body is out of psi4 proper
pytest.param({'first': 'atmgr', 'second': 'atmgr', 'parent': 'eneyne', 'subject': 'gAmB', 'lbl': 'ATM'}, marks=[using("dftd3_321"), pytest.mark.xfail]),
pytest.param({'first': 'pbe-atmgr', 'second': None, 'parent': 'ne', 'subject': 'atom', 'lbl': 'ATM'}, marks=[using("dftd3_321"), pytest.mark.xfail]),
]) # yapf: disable
def test_molecule__run_dftd3__23body(inp, subjects):
subject = subjects()[inp['parent']][inp['subject']]
expected = ref[inp['parent']][inp['lbl']][inp['subject']]
gexpected = gref[inp['parent']][inp['lbl']][inp['subject']]
E, G = subject.run_dftd3(inp['first'], inp['second'])
assert compare_values(expected, E, atol=1.e-7)
assert compare_values(gexpected, G, atol=1.e-7)
@using("qcdb")
def test_qcdb__energy_d3():
eneyne = qcdb.set_molecule(seneyne)
eneyne.update_geometry()
E, jrec = qcdb.energy('d3-b3lyp-d2', return_wfn=True)
assert compare_values(ref['eneyne']['B3LYP-D2']['dimer'], E, 7, 'P: Ethene-Ethyne -D2')
assert compare_values(ref['eneyne']['B3LYP-D2']['dimer'], jrec['qcvars']['DISPERSION CORRECTION ENERGY'].data, 7,
tnm())
assert compare_values(ref['eneyne']['B3LYP-D2']['dimer'],
jrec['qcvars']['B3LYP-D2 DISPERSION CORRECTION ENERGY'].data, 7, tnm())
mA = eneyne.extract_subsets(1)
E, jrec = qcdb.energy('d3-b3lyp-d3bj', return_wfn=True, molecule=mA)
assert compare_values(ref['eneyne']['B3LYP-D3(BJ)']['mA'], E, 7, tnm())
assert compare_values(ref['eneyne']['B3LYP-D3(BJ)']['mA'], jrec['qcvars']['DISPERSION CORRECTION ENERGY'].data, 7,
tnm())
assert compare_values(ref['eneyne']['B3LYP-D3(BJ)']['mA'],
jrec['qcvars']['B3LYP-D3(BJ) DISPERSION CORRECTION ENERGY'].data, 7, tnm())
@using("mp2d")
@pytest.mark.parametrize(
"subjects",
[
pytest.param(eneyne_ne_psi4mols, marks=using("psi4")),
pytest.param(eneyne_ne_qcdbmols,
marks=using("psi4")), # needs qcdb.Molecule, presently more common in psi4 than in qcdb
pytest.param(eneyne_ne_qcschemamols),
],
ids=['qmol', 'pmol', 'qcmol'])
@pytest.mark.parametrize("inp", [
({'parent': 'eneyne', 'name': 'mp2d-mp2-dmp2', 'subject': 'dimer', 'lbl': 'MP2-DMP2'}),
({'parent': 'eneyne', 'name': 'mp2d-mp2-dmp2', 'subject': 'mA', 'lbl': 'MP2-DMP2'}),
({'parent': 'eneyne', 'name': 'mp2d-mp2-dmp2', 'subject': 'mB', 'lbl': 'MP2-DMP2'}),
({'parent': 'eneyne', 'name': 'mp2d-mp2-dmp2', 'subject': 'gAmB', 'lbl': 'MP2-DMP2'}),
({'parent': 'eneyne', 'name': 'mp2d-mp2-dmp2', 'subject': 'mAgB', 'lbl': 'MP2-DMP2'}),
({'parent': 'ne', 'name': 'mp2d-mp2-dmp2', 'subject': 'atom', 'lbl': 'MP2-DMP2'}),
]) # yapf: disable
def test_mp2d__run_mp2d__2body(inp, subjects, request):
subject = subjects()[inp['parent']][inp['subject']]
expected = ref[inp['parent']][inp['lbl']][inp['subject']]
gexpected = gref[inp['parent']][inp['lbl']][inp['subject']].ravel()
if 'qcmol' in request.node.name:
mol = subject
else:
mol = subject.to_schema(dtype=2)
resinp = {
'schema_name': 'qcschema_input',
'schema_version': 1,
'molecule': mol,
'driver': 'gradient',
'model': {
'method': inp['name']
},
'keywords': {},
}
jrec = qcng.compute(resinp, 'mp2d', raise_error=True)
jrec = jrec.dict()
#assert len(jrec['extras']['qcvars']) == 8
assert compare_values(expected, jrec['extras']['qcvars']['CURRENT ENERGY'], atol=1.e-7)
assert compare_values(expected, jrec['extras']['qcvars']['DISPERSION CORRECTION ENERGY'], atol=1.e-7)
assert compare_values(expected, jrec['extras']['qcvars'][inp['lbl'] + ' DISPERSION CORRECTION ENERGY'], atol=1.e-7)
assert compare_values(gexpected, jrec['extras']['qcvars']['CURRENT GRADIENT'], atol=1.e-7)
assert compare_values(gexpected, jrec['extras']['qcvars']['DISPERSION CORRECTION GRADIENT'], atol=1.e-7)
assert compare_values(
gexpected, jrec['extras']['qcvars'][inp['lbl'] + ' DISPERSION CORRECTION GRADIENT'], atol=1.e-7)
@using("dftd3")
@pytest.mark.parametrize(
"subjects",
[
pytest.param(eneyne_ne_psi4mols, marks=using("psi4")),
pytest.param(eneyne_ne_qcdbmols,
marks=using("psi4")), # needs qcdb.Molecule, presently more common in psi4 than in qcdb
pytest.param(eneyne_ne_qcschemamols),
],
ids=['qmol', 'pmol', 'qcmol'])
@pytest.mark.parametrize("inp", [
({'parent': 'eneyne', 'name': 'd3-b3lyp-d', 'subject': 'dimer', 'lbl': 'B3LYP-D2'}),
({'parent': 'eneyne', 'name': 'd3-b3lyp-d3bj', 'subject': 'mA', 'lbl': 'B3LYP-D3(BJ)'}),
({'parent': 'eneyne', 'name': 'd3-PBE-D3zero', 'subject': 'mB', 'lbl': 'PBE-D3'}),
({'parent': 'eneyne', 'name': 'd3-PBE-D3zero', 'subject': 'gAmB', 'lbl': 'PBE-D3'}),
({'parent': 'eneyne', 'name': 'd3-PBE-D2', 'subject': 'mAgB', 'lbl': 'PBE-D2'}),
({'parent': 'ne', 'name': 'd3-b3lyp-d3bj', 'subject': 'atom', 'lbl': 'B3LYP-D3(BJ)'}),
]) # yapf: disable
def test_dftd3__run_dftd3__2body(inp, subjects, request):
subject = subjects()[inp['parent']][inp['subject']]
expected = ref[inp['parent']][inp['lbl']][inp['subject']]
gexpected = gref[inp['parent']][inp['lbl']][inp['subject']].ravel()
if 'qcmol' in request.node.name:
mol = subject
else:
mol = subject.to_schema(dtype=2)
resinp = {
'schema_name': 'qcschema_input',
'schema_version': 1,
'molecule': mol,
'driver': 'gradient',
'model': {
'method': inp['name']
},
'keywords': {},
}
jrec = qcng.compute(resinp, 'dftd3', raise_error=True)
jrec = jrec.dict()
assert len(jrec['extras']['qcvars']) == 8
assert compare_values(expected, jrec['extras']['qcvars']['CURRENT ENERGY'], atol=1.e-7)
assert compare_values(expected, jrec['extras']['qcvars']['DISPERSION CORRECTION ENERGY'], atol=1.e-7)
assert compare_values(expected, jrec['extras']['qcvars']['2-BODY DISPERSION CORRECTION ENERGY'], atol=1.e-7)
assert compare_values(expected, jrec['extras']['qcvars'][inp['lbl'] + ' DISPERSION CORRECTION ENERGY'], atol=1.e-7)
assert compare_values(gexpected, jrec['extras']['qcvars']['CURRENT GRADIENT'], atol=1.e-7)
assert compare_values(gexpected, jrec['extras']['qcvars']['DISPERSION CORRECTION GRADIENT'], atol=1.e-7)
assert compare_values(gexpected, jrec['extras']['qcvars']['2-BODY DISPERSION CORRECTION GRADIENT'], atol=1.e-7)
assert compare_values(
gexpected, jrec['extras']['qcvars'][inp['lbl'] + ' DISPERSION CORRECTION GRADIENT'], atol=1.e-7)
@using("dftd3_321")
@pytest.mark.parametrize(
"subjects",
[
pytest.param(eneyne_ne_psi4mols, marks=using("psi4")),
pytest.param(eneyne_ne_qcdbmols,
marks=using("psi4")), # needs qcdb.Molecule, presently more common in psi4 than in qcdb
pytest.param(eneyne_ne_qcschemamols),
],
ids=['qmol', 'pmol', 'qcmol'])
@pytest.mark.parametrize("inp", [
({'parent': 'eneyne', 'name': 'd3-atmgr', 'subject': 'dimer', 'lbl': 'ATM'}),
({'parent': 'eneyne', 'name': 'd3-b3lyp-atmgr', 'subject': 'mA', 'lbl': 'ATM'}),
({'parent': 'eneyne', 'name': 'd3-pbe-atm(gr)', 'subject': 'mB', 'lbl': 'ATM'}),
({'parent': 'eneyne', 'name': 'd3-ATMgr', 'subject': 'mAgB', 'lbl': 'ATM'}),
({'parent': 'eneyne', 'name': 'd3-atmgr', 'subject': 'gAmB', 'lbl': 'ATM'}),
({'parent': 'ne', 'name': 'd3-atmgr', 'subject': 'atom', 'lbl': 'ATM'}),
]) # yapf: disable
def test_dftd3__run_dftd3__3body(inp, subjects, request):
subject = subjects()[inp['parent']][inp['subject']]
expected = ref[inp['parent']][inp['lbl']][inp['subject']]
gexpected = gref[inp['parent']][inp['lbl']][inp['subject']].ravel()
if 'qcmol' in request.node.name:
mol = subject
else:
mol = subject.to_schema(dtype=2)
resinp = {
'schema_name': 'qcschema_input',
'schema_version': 1,
'molecule': mol,
'driver': 'gradient',
'model': {
'method': inp['name']
},
'keywords': {},
}
jrec = qcng.compute(resinp, 'dftd3', raise_error=True)
jrec = jrec.dict()
assert len(jrec['extras']['qcvars']) == 8
assert compare_values(expected, jrec['extras']['qcvars']['CURRENT ENERGY'], atol=1.e-7)
assert compare_values(expected, jrec['extras']['qcvars']['DISPERSION CORRECTION ENERGY'], atol=1.e-7)
assert compare_values(expected, jrec['extras']['qcvars']['3-BODY DISPERSION CORRECTION ENERGY'], atol=1.e-7)
assert compare_values(
expected, jrec['extras']['qcvars']['AXILROD-TELLER-MUTO 3-BODY DISPERSION CORRECTION ENERGY'], atol=1.e-7)
assert compare_values(gexpected, jrec['extras']['qcvars']['CURRENT GRADIENT'], atol=1.e-7)
assert compare_values(gexpected, jrec['extras']['qcvars']['DISPERSION CORRECTION GRADIENT'], atol=1.e-7)
assert compare_values(gexpected, jrec['extras']['qcvars']['3-BODY DISPERSION CORRECTION GRADIENT'], atol=1.e-7)
assert compare_values(
gexpected, jrec['extras']['qcvars']['AXILROD-TELLER-MUTO 3-BODY DISPERSION CORRECTION GRADIENT'], atol=1.e-7)
|
ashutoshvt/psi4
|
tests/pytests/test_qcng_dftd3_mp2d.py
|
Python
|
lgpl-3.0
| 19,641
|
[
"Psi4"
] |
f59fe18228b75f75c23e3163d4d02e9ece7291033c033c018e7e901d3fea55f9
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various learning rate decay functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.exponential_decay"])
def exponential_decay(learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies exponential decay to the learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay function
to a provided initial learning rate. It requires a `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate *
decay_rate ^ (global_step / decay_steps)
```
If the argument `staircase` is `True`, then `global_step / decay_steps` is an
integer division and the decayed learning rate follows a staircase function.
Example: decay every 100000 steps with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
learning_rate = tf.compat.v1.train.exponential_decay(starter_learning_rate,
global_step,
100000, 0.96, staircase=True)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global
step to use for the decay computation. Must not be negative.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Must
be positive. See the decay computation above.
decay_rate: A scalar `float32` or `float64` `Tensor` or a Python number.
The decay rate.
staircase: Boolean. If `True` decay the learning rate at discrete intervals
name: String. Optional name of the operation. Defaults to
'ExponentialDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.ExponentialDecay(
learning_rate, decay_steps, decay_rate, staircase=staircase, name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.piecewise_constant_decay", "train.piecewise_constant"])
def piecewise_constant(x, boundaries, values, name=None):
"""Piecewise constant from boundaries and interval values.
Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5
for the next 10000 steps, and 0.1 for any additional steps.
```python
global_step = tf.Variable(0, trainable=False)
boundaries = [100000, 110000]
values = [1.0, 0.5, 0.1]
learning_rate = tf.compat.v1.train.piecewise_constant(global_step, boundaries,
values)
# Later, whenever we perform an optimization step, we increment global_step.
```
Args:
x: A 0-D scalar `Tensor`. Must be one of the following types: `float32`,
`float64`, `uint8`, `int8`, `int16`, `int32`, `int64`.
boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
increasing entries, and with all elements having the same type as `x`.
values: A list of `Tensor`s or `float`s or `int`s that specifies the values
for the intervals defined by `boundaries`. It should have one more element
than `boundaries`, and all elements should have the same type.
name: A string. Optional name of the operation. Defaults to
'PiecewiseConstant'.
Returns:
A 0-D Tensor. Its value is `values[0]` when `x <= boundaries[0]`,
`values[1]` when `x > boundaries[0]` and `x <= boundaries[1]`, ...,
and values[-1] when `x > boundaries[-1]`.
Raises:
ValueError: if types of `x` and `boundaries` do not match, or types of all
`values` do not match or
the number of elements in the lists does not match.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
boundaries = ops.convert_n_to_tensor(boundaries)
values = ops.convert_n_to_tensor(values)
x_recomp = ops.convert_to_tensor(x)
# Avoid explicit conversion to x's dtype. This could result in faulty
# comparisons, for example if floats are converted to integers.
for i, b in enumerate(boundaries):
if b.dtype.base_dtype != x_recomp.dtype.base_dtype:
# We can promote int32 boundaries to int64 without loss of precision.
# This covers the most common case where the user passes in boundaries
# as an array of Python integers.
if (b.dtype.base_dtype == dtypes.int32 and
x_recomp.dtype.base_dtype == dtypes.int64):
b = math_ops.cast(b, x_recomp.dtype.base_dtype)
boundaries[i] = b
else:
raise ValueError(
"Boundaries (%s) must have the same dtype as x (%s)." %
(b.dtype.base_dtype, x_recomp.dtype.base_dtype))
for v in values[1:]:
if v.dtype.base_dtype != values[0].dtype.base_dtype:
raise ValueError(
"Values must have elements all with the same dtype (%s vs %s)." %
(values[0].dtype.base_dtype, v.dtype.base_dtype))
decayed_lr = learning_rate_schedule.PiecewiseConstantDecay(
boundaries, values, name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr(x)
else:
decayed_lr = functools.partial(decayed_lr, x)
return decayed_lr
@tf_export(v1=["train.polynomial_decay"])
def polynomial_decay(learning_rate,
global_step,
decay_steps,
end_learning_rate=0.0001,
power=1.0,
cycle=False,
name=None):
"""Applies a polynomial decay to the learning rate.
It is commonly observed that a monotonically decreasing learning rate, whose
degree of change is carefully chosen, results in a better performing model.
This function applies a polynomial decay function to a provided initial
`learning_rate` to reach an `end_learning_rate` in the given `decay_steps`.
It requires a `global_step` value to compute the decayed learning rate. You
can just pass a TensorFlow variable that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
global_step = min(global_step, decay_steps)
decayed_learning_rate = (learning_rate - end_learning_rate) *
(1 - global_step / decay_steps) ^ (power) +
end_learning_rate
```
If `cycle` is True then a multiple of `decay_steps` is used, the first one
that is bigger than `global_steps`.
```python
decay_steps = decay_steps * ceil(global_step / decay_steps)
decayed_learning_rate = (learning_rate - end_learning_rate) *
(1 - global_step / decay_steps) ^ (power) +
end_learning_rate
```
Example: decay from 0.1 to 0.01 in 10000 steps using sqrt (i.e. power=0.5):
```python
...
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
end_learning_rate = 0.01
decay_steps = 10000
learning_rate = tf.compat.v1.train.polynomial_decay(starter_learning_rate,
global_step,
decay_steps, end_learning_rate,
power=0.5)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global
step to use for the decay computation. Must not be negative.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Must
be positive. See the decay computation above.
end_learning_rate: A scalar `float32` or `float64` `Tensor` or a Python
number. The minimal end learning rate.
power: A scalar `float32` or `float64` `Tensor` or a Python number. The
power of the polynomial. Defaults to linear, 1.0.
cycle: A boolean, whether or not it should cycle beyond decay_steps.
name: String. Optional name of the operation. Defaults to
'PolynomialDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.PolynomialDecay(
learning_rate,
decay_steps,
end_learning_rate=end_learning_rate,
power=power,
cycle=cycle,
name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.natural_exp_decay"])
def natural_exp_decay(learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies natural exponential decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay function
to a provided initial learning rate. It requires an `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate * exp(-decay_rate * global_step /
decay_step)
```
or, if `staircase` is `True`, as:
```python
decayed_learning_rate = learning_rate * exp(-decay_rate * floor(global_step /
decay_step))
```
Example: decay exponentially with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
decay_steps = 5
k = 0.5
learning_rate = tf.compat.v1.train.natural_exp_decay(learning_rate,
global_step,
decay_steps, k)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number.
The initial learning rate.
global_step: A Python number. Global step to use for the decay computation.
Must not be negative.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'ExponentialTimeDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
natural_exp_rate = math_ops.exp(math_ops.negative(decay_rate))
decayed_lr = learning_rate_schedule.ExponentialDecay(
learning_rate,
decay_steps,
natural_exp_rate,
staircase=staircase,
name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.inverse_time_decay"])
def inverse_time_decay(learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies inverse time decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an inverse decay function
to a provided initial learning rate. It requires an `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate / (1 + decay_rate * global_step /
decay_step)
```
or, if `staircase` is `True`, as:
```python
decayed_learning_rate = learning_rate / (1 + decay_rate * floor(global_step /
decay_step))
```
Example: decay 1/t with a rate of 0.5:
```python
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
decay_steps = 1.0
decay_rate = 0.5
learning_rate = tf.compat.v1.train.inverse_time_decay(learning_rate,
global_step,
decay_steps, decay_rate)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number.
The initial learning rate.
global_step: A Python number. Global step to use for the decay computation.
Must not be negative.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'InverseTimeDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.InverseTimeDecay(
learning_rate, decay_steps, decay_rate, staircase=staircase, name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.cosine_decay"])
def cosine_decay(learning_rate, global_step, decay_steps, alpha=0.0, name=None):
"""Applies cosine decay to the learning rate.
See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies a cosine decay function
to a provided initial learning rate. It requires a `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
global_step = min(global_step, decay_steps)
cosine_decay = 0.5 * (1 + cos(pi * global_step / decay_steps))
decayed = (1 - alpha) * cosine_decay + alpha
decayed_learning_rate = learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed = cosine_decay(learning_rate, global_step, decay_steps)
```
Args:
learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global
step to use for the decay computation.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number
of steps to decay over.
alpha: A scalar `float32` or `float64` Tensor or a Python number. Minimum
learning rate value as a fraction of learning_rate.
name: String. Optional name of the operation. Defaults to 'CosineDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.CosineDecay(
learning_rate, decay_steps, alpha=alpha, name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.cosine_decay_restarts"])
def cosine_decay_restarts(learning_rate,
global_step,
first_decay_steps,
t_mul=2.0,
m_mul=1.0,
alpha=0.0,
name=None):
"""Applies cosine decay with restarts to the learning rate.
See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies a cosine decay function with
restarts to a provided initial learning rate. It requires a `global_step`
value to compute the decayed learning rate. You can just pass a TensorFlow
variable that you increment at each training step.
The function returns the decayed learning rate while taking into account
possible warm restarts. The learning rate multiplier first decays
from 1 to `alpha` for `first_decay_steps` steps. Then, a warm
restart is performed. Each new warm restart runs for `t_mul` times more steps
and with `m_mul` times smaller initial learning rate.
Example usage:
```python
first_decay_steps = 1000
lr_decayed = cosine_decay_restarts(learning_rate, global_step,
first_decay_steps)
```
Args:
learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global
step to use for the decay computation.
first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
t_mul: A scalar `float32` or `float64` `Tensor` or a Python number. Used to
derive the number of iterations in the i-th period
m_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the initial learning rate of the i-th period:
alpha: A scalar `float32` or `float64` Tensor or a Python number. Minimum
learning rate value as a fraction of the learning_rate.
name: String. Optional name of the operation. Defaults to 'SGDRDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.CosineDecayRestarts(
learning_rate,
first_decay_steps,
t_mul=t_mul,
m_mul=m_mul,
alpha=alpha,
name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.linear_cosine_decay"])
def linear_cosine_decay(learning_rate,
global_step,
decay_steps,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None):
"""Applies linear cosine decay to the learning rate.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies a linear cosine decay function
to a provided initial learning rate. It requires a `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
global_step = min(global_step, decay_steps)
linear_decay = (decay_steps - global_step) / decay_steps)
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * global_step / decay_steps))
decayed = (alpha + linear_decay) * cosine_decay + beta
decayed_learning_rate = learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed = linear_cosine_decay(learning_rate, global_step, decay_steps)
```
Args:
learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global
step to use for the decay computation.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number
of steps to decay over.
num_periods: Number of periods in the cosine part of the decay. See
computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'LinearCosineDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.LinearCosineDecay(
learning_rate,
decay_steps,
num_periods=num_periods,
alpha=alpha,
beta=beta,
name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.noisy_linear_cosine_decay"])
def noisy_linear_cosine_decay(learning_rate,
global_step,
decay_steps,
initial_variance=1.0,
variance_decay=0.55,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None):
"""Applies noisy linear cosine decay to the learning rate.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies a noisy linear
cosine decay function to a provided initial learning rate.
It requires a `global_step` value to compute the decayed learning rate.
You can just pass a TensorFlow variable that you increment at each
training step.
The function returns the decayed learning rate. It is computed as:
```python
global_step = min(global_step, decay_steps)
linear_decay = (decay_steps - global_step) / decay_steps)
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * global_step / decay_steps))
decayed = (alpha + linear_decay + eps_t) * cosine_decay + beta
decayed_learning_rate = learning_rate * decayed
```
where eps_t is 0-centered gaussian noise with variance
initial_variance / (1 + global_step) ** variance_decay
Example usage:
```python
decay_steps = 1000
lr_decayed = noisy_linear_cosine_decay(
learning_rate, global_step, decay_steps)
```
Args:
learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global
step to use for the decay computation.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number
of steps to decay over.
initial_variance: initial variance for the noise. See computation above.
variance_decay: decay for the noise's variance. See computation above.
num_periods: Number of periods in the cosine part of the decay. See
computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'NoisyLinearCosineDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.NoisyLinearCosineDecay(
learning_rate,
decay_steps,
initial_variance=initial_variance,
variance_decay=variance_decay,
num_periods=num_periods,
alpha=alpha,
beta=beta,
name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
|
ghchinoy/tensorflow
|
tensorflow/python/training/learning_rate_decay.py
|
Python
|
apache-2.0
| 29,150
|
[
"Gaussian"
] |
13ebdf750202396485c88b1782a5ba2b3bfeb0523eafee4273c6c568f5373bdb
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The minc module provides classes for interfacing with the `MINC
<http://www.bic.mni.mcgill.ca/ServicesSoftware/MINC>`_ command line tools. This
module was written to work with MINC version 2.2.00.
Author: Carlo Hamalainen <carlo@carlo-hamalainen.net>
http://carlo-hamalainen.net
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from ..base import (
TraitedSpec,
CommandLineInputSpec,
CommandLine,
StdOutCommandLineInputSpec,
StdOutCommandLine,
File,
Directory,
InputMultiPath,
OutputMultiPath,
traits,
isdefined,
)
import glob
import os
import os.path
import re
from ..minc.base import check_minc, no_minc, Info, aggregate_filename
import warnings
warn = warnings.warn
warnings.filterwarnings('always', category=UserWarning)
class ExtractInputSpec(StdOutCommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,)
output_file = File(
desc='output file',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s.raw',
keep_extension=False)
_xor_write = ('write_ascii', 'write_ascii', 'write_byte',
'write_short', 'write_int', 'write_long',
'write_float', 'write_double', 'write_signed',
'write_unsigned',)
write_ascii = traits.Bool(
desc='Write out data as ascii strings (default).',
argstr='-ascii',
xor=_xor_write)
write_byte = traits.Bool(
desc='Write out data as bytes.',
argstr='-byte',
xor=_xor_write)
write_short = traits.Bool(
desc='Write out data as short integers.',
argstr='-short',
xor=_xor_write)
write_int = traits.Bool(
desc='Write out data as 32-bit integers.',
argstr='-int',
xor=_xor_write)
write_long = traits.Bool(
desc='Superseded by write_int.',
argstr='-long',
xor=_xor_write)
write_float = traits.Bool(
desc='Write out data as single precision floating-point values.',
argstr='-float',
xor=_xor_write)
write_double = traits.Bool(
desc='Write out data as double precision floating-point values.',
argstr='-double',
xor=_xor_write)
_xor_signed = ('write_signed', 'write_unsigned')
write_signed = traits.Bool(
desc='Write out signed data.',
argstr='-signed',
xor=_xor_signed)
write_unsigned = traits.Bool(
desc='Write out unsigned data.',
argstr='-unsigned',
xor=_xor_signed)
write_range = traits.Tuple(
traits.Float, traits.Float, argstr='-range %s %s',
desc='Specify the range of output values\nDefault value: 1.79769e+308 1.79769e+308.',)
_xor_normalize = ('normalize', 'nonormalize',)
normalize = traits.Bool(
desc='Normalize integer pixel values to file max and min.',
argstr='-normalize',
xor=_xor_normalize)
nonormalize = traits.Bool(
desc='Turn off pixel normalization.',
argstr='-nonormalize',
xor=_xor_normalize)
image_range = traits.Tuple(
traits.Float, traits.Float,
desc='Specify the range of real image values for normalization.',
argstr='-image_range %s %s')
image_minimum = traits.Float(
desc=('Specify the minimum real image value for normalization.'
'Default value: 1.79769e+308.'),
argstr='-image_minimum %s')
image_maximum = traits.Float(
desc=('Specify the maximum real image value for normalization.'
'Default value: 1.79769e+308.'),
argstr='-image_maximum %s')
start = InputMultiPath(
traits.Int,
desc='Specifies corner of hyperslab (C conventions for indices).',
sep=',',
argstr='-start %s',)
count = InputMultiPath(
traits.Int,
desc='Specifies edge lengths of hyperslab to read.',
sep=',',
argstr='-count %s',)
# FIXME Can we make sure that len(start) == len(count)?
_xor_flip = (
'flip_positive_direction',
'flip_negative_direction',
'flip_any_direction')
flip_positive_direction = traits.Bool(
desc='Flip images to always have positive direction.',
argstr='-positive_direction',
xor=_xor_flip)
flip_negative_direction = traits.Bool(
desc='Flip images to always have negative direction.',
argstr='-negative_direction',
xor=_xor_flip)
flip_any_direction = traits.Bool(
desc='Do not flip images (Default).',
argstr='-any_direction',
xor=_xor_flip)
_xor_x_flip = ('flip_x_positive', 'flip_x_negative', 'flip_x_any')
flip_x_positive = traits.Bool(
desc='Flip images to give positive xspace:step value (left-to-right).',
argstr='+xdirection',
xor=_xor_x_flip)
flip_x_negative = traits.Bool(
desc='Flip images to give negative xspace:step value (right-to-left).',
argstr='-xdirection',
xor=_xor_x_flip)
flip_x_any = traits.Bool(
desc='Don\'t flip images along x-axis (default).',
argstr='-xanydirection',
xor=_xor_x_flip)
_xor_y_flip = ('flip_y_positive', 'flip_y_negative', 'flip_y_any')
flip_y_positive = traits.Bool(
desc='Flip images to give positive yspace:step value (post-to-ant).',
argstr='+ydirection',
xor=_xor_y_flip)
flip_y_negative = traits.Bool(
desc='Flip images to give negative yspace:step value (ant-to-post).',
argstr='-ydirection',
xor=_xor_y_flip)
flip_y_any = traits.Bool(
desc='Don\'t flip images along y-axis (default).',
argstr='-yanydirection',
xor=_xor_y_flip)
_xor_z_flip = ('flip_z_positive', 'flip_z_negative', 'flip_z_any')
flip_z_positive = traits.Bool(
desc='Flip images to give positive zspace:step value (inf-to-sup).',
argstr='+zdirection',
xor=_xor_z_flip)
flip_z_negative = traits.Bool(
desc='Flip images to give negative zspace:step value (sup-to-inf).',
argstr='-zdirection',
xor=_xor_z_flip)
flip_z_any = traits.Bool(
desc='Don\'t flip images along z-axis (default).',
argstr='-zanydirection',
xor=_xor_z_flip)
class ExtractOutputSpec(TraitedSpec):
output_file = File(desc='output file in raw/text format', exists=True)
class Extract(StdOutCommandLine):
"""Dump a hyperslab of MINC file data.
Examples
--------
>>> from nipype.interfaces.minc import Extract
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> extract = Extract(input_file=minc2Dfile)
>>> extract.run() # doctest: +SKIP
>>> extract = Extract(input_file=minc2Dfile, start=[3, 10, 5], count=[4, 4, 4]) # extract a 4x4x4 slab at offset [3, 10, 5]
>>> extract.run() # doctest: +SKIP
"""
input_spec = ExtractInputSpec
output_spec = ExtractOutputSpec
_cmd = 'mincextract'
class ToRawInputSpec(StdOutCommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,)
output_file = File(
desc='output file',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s.raw',
keep_extension=False)
_xor_write = ('write_byte', 'write_short', 'write_int',
'write_long', 'write_float', 'write_double')
write_byte = traits.Bool(
desc='Write out data as bytes.',
argstr='-byte',
xor=_xor_write)
write_short = traits.Bool(
desc='Write out data as short integers.',
argstr='-short',
xor=_xor_write)
write_int = traits.Bool(
desc='Write out data as 32-bit integers.',
argstr='-int',
xor=_xor_write)
write_long = traits.Bool(
desc='Superseded by write_int.',
argstr='-long',
xor=_xor_write)
write_float = traits.Bool(
desc='Write out data as single precision floating-point values.',
argstr='-float',
xor=_xor_write)
write_double = traits.Bool(
desc='Write out data as double precision floating-point values.',
argstr='-double',
xor=_xor_write)
_xor_signed = ('write_signed', 'write_unsigned')
write_signed = traits.Bool(
desc='Write out signed data.',
argstr='-signed',
xor=_xor_signed)
write_unsigned = traits.Bool(
desc='Write out unsigned data.',
argstr='-unsigned',
xor=_xor_signed)
write_range = traits.Tuple(
traits.Float, traits.Float, argstr='-range %s %s',
desc=('Specify the range of output values.'
'Default value: 1.79769e+308 1.79769e+308.'),)
_xor_normalize = ('normalize', 'nonormalize',)
normalize = traits.Bool(
desc='Normalize integer pixel values to file max and min.',
argstr='-normalize',
xor=_xor_normalize)
nonormalize = traits.Bool(
desc='Turn off pixel normalization.',
argstr='-nonormalize',
xor=_xor_normalize)
class ToRawOutputSpec(TraitedSpec):
output_file = File(desc='output file in raw format', exists=True)
class ToRaw(StdOutCommandLine):
"""Dump a chunk of MINC file data. This program is largely
superceded by mincextract (see Extract).
Examples
--------
>>> from nipype.interfaces.minc import ToRaw
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> toraw = ToRaw(input_file=minc2Dfile)
>>> toraw.run() # doctest: +SKIP
>>> toraw = ToRaw(input_file=minc2Dfile, write_range=(0, 100))
>>> toraw.run() # doctest: +SKIP
"""
input_spec = ToRawInputSpec
output_spec = ToRawOutputSpec
_cmd = 'minctoraw'
class ConvertInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file for converting',
exists=True,
mandatory=True,
argstr='%s',
position=-2,)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_convert_output.mnc')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2')
template = traits.Bool(
desc=('Create a template file. The dimensions, variables, and'
'attributes of the input file are preserved but all data it set to zero.'),
argstr='-template',
)
compression = traits.Enum(
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
argstr='-compress %s',
desc='Set the compression level, from 0 (disabled) to 9 (maximum).',
)
chunk = traits.Range(
low=0,
desc='Set the target block size for chunking (0 default, >1 block size).',
value=0,
usedefault=False,
argstr='-chunk %d',
)
class ConvertOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Convert(CommandLine):
"""convert between MINC 1 to MINC 2 format.
Examples
--------
>>> from nipype.interfaces.minc import Convert
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> c = Convert(input_file=minc2Dfile, output_file='/tmp/out.mnc', two=True) # Convert to MINC2 format.
>>> c.run() # doctest: +SKIP
"""
input_spec = ConvertInputSpec
output_spec = ConvertOutputSpec
_cmd = 'mincconvert'
class CopyInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file to copy',
exists=True,
mandatory=True,
argstr='%s',
position=-2,)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_copy.mnc')
_xor_pixel = ('pixel_values', 'real_values')
pixel_values = traits.Bool(
desc='Copy pixel values as is.',
argstr='-pixel_values',
xor=_xor_pixel)
real_values = traits.Bool(
desc='Copy real pixel intensities (default).',
argstr='-real_values',
xor=_xor_pixel)
class CopyOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Copy(CommandLine):
"""
Copy image values from one MINC file to another. Both the input
and output files must exist, and the images in both files must
have an equal number dimensions and equal dimension lengths.
NOTE: This program is intended primarily for use with scripts
such as mincedit. It does not follow the typical design rules of
most MINC command-line tools and therefore should be used only
with caution.
"""
input_spec = CopyInputSpec
output_spec = CopyOutputSpec
_cmd = 'minccopy'
class ToEcatInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file to convert',
exists=True,
mandatory=True,
argstr='%s',
position=-2,)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_to_ecat.v',
keep_extension=False)
ignore_patient_variable = traits.Bool(
desc='Ignore informations from the minc patient variable.',
argstr='-ignore_patient_variable',)
ignore_study_variable = traits.Bool(
desc='Ignore informations from the minc study variable.',
argstr='-ignore_study_variable',)
ignore_acquisition_variable = traits.Bool(
desc='Ignore informations from the minc acquisition variable.',
argstr='-ignore_acquisition_variable',)
ignore_ecat_acquisition_variable = traits.Bool(
desc='Ignore informations from the minc ecat_acquisition variable.',
argstr='-ignore_ecat_acquisition_variable',)
ignore_ecat_main = traits.Bool(
desc='Ignore informations from the minc ecat-main variable.',
argstr='-ignore_ecat_main',)
ignore_ecat_subheader_variable = traits.Bool(
desc='Ignore informations from the minc ecat-subhdr variable.',
argstr='-ignore_ecat_subheader_variable',)
no_decay_corr_fctr = traits.Bool(
desc='Do not compute the decay correction factors',
argstr='-no_decay_corr_fctr',)
voxels_as_integers = traits.Bool(
desc=('Voxel values are treated as integers, scale and'
'calibration factors are set to unity'),
argstr='-label',)
class ToEcatOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class ToEcat(CommandLine):
"""Convert a 2D image, a 3D volumes or a 4D dynamic volumes
written in MINC file format to a 2D, 3D or 4D Ecat7 file.
Examples
--------
>>> from nipype.interfaces.minc import ToEcat
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> c = ToEcat(input_file=minc2Dfile)
>>> c.run() # doctest: +SKIP
>>> c = ToEcat(input_file=minc2Dfile, voxels_as_integers=True)
>>> c.run() # doctest: +SKIP
"""
input_spec = ToEcatInputSpec
output_spec = ToEcatOutputSpec
_cmd = 'minctoecat'
class DumpInputSpec(StdOutCommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,)
output_file = File(
desc='output file',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_dump.txt',
keep_extension=False)
_xor_coords_or_header = ('coordinate_data', 'header_data',)
coordinate_data = traits.Bool(
desc='Coordinate variable data and header information.',
argstr='-c',
xor=_xor_coords_or_header)
header_data = traits.Bool(
desc='Header information only, no data.',
argstr='-h',
xor=_xor_coords_or_header)
_xor_annotations = ('annotations_brief', 'annotations_full',)
annotations_brief = traits.Enum(
'c',
'f',
argstr='-b %s',
desc='Brief annotations for C or Fortran indices in data.',
xor=_xor_annotations)
annotations_full = traits.Enum(
'c',
'f',
argstr='-f %s',
desc='Full annotations for C or Fortran indices in data.',
xor=_xor_annotations)
variables = InputMultiPath(
traits.Str,
desc='Output data for specified variables only.',
sep=',',
argstr='-v %s')
line_length = traits.Range(
low=0,
desc='Line length maximum in data section (default 80).',
value=80,
usedefault=False,
argstr='-l %d')
netcdf_name = traits.Str(
desc='Name for netCDF (default derived from file name).',
argstr='-n %s')
precision = traits.Either(
traits.Int(),
traits.Tuple(traits.Int, traits.Int),
desc='Display floating-point values with less precision',
argstr='%s',) # See _format_arg in Dump for actual formatting.
class DumpOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Dump(StdOutCommandLine):
"""Dump a MINC file. Typically used in conjunction with mincgen (see Gen).
Examples
--------
>>> from nipype.interfaces.minc import Dump
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> dump = Dump(input_file=minc2Dfile)
>>> dump.run() # doctest: +SKIP
>>> dump = Dump(input_file=minc2Dfile, output_file='/tmp/out.txt', precision=(3, 4))
>>> dump.run() # doctest: +SKIP
"""
input_spec = DumpInputSpec
output_spec = DumpOutputSpec
_cmd = 'mincdump'
def _format_arg(self, name, spec, value):
if name == 'precision':
if isinstance(value, int):
return '-p %d' % value
elif isinstance(value, tuple) and isinstance(value[0], int) and isinstance(value[1], int):
return '-p %d,%d' % (value[0], value[1],)
else:
raise ValueError('Invalid precision argument: ' + str(value))
return super(Dump, self)._format_arg(name, spec, value)
class AverageInputSpec(CommandLineInputSpec):
_xor_input_files = ('input_files', 'filelist')
input_files = InputMultiPath(
traits.File(exists=True),
desc='input file(s)',
mandatory=True,
sep=' ',
argstr='%s',
position=-2,
xor=_xor_input_files)
filelist = traits.File(
desc='Specify the name of a file containing input file names.',
argstr='-filelist %s',
exists=True,
mandatory=True,
xor=_xor_input_files)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_files'],
hash_files=False,
name_template='%s_averaged.mnc')
two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
_xor_verbose = ('verbose', 'quiet',)
verbose = traits.Bool(
desc='Print out log messages (default).',
argstr='-verbose',
xor=_xor_verbose)
quiet = traits.Bool(
desc='Do not print out log messages.',
argstr='-quiet',
xor=_xor_verbose)
debug = traits.Bool(desc='Print out debugging messages.', argstr='-debug')
_xor_check_dimensions = ('check_dimensions', 'no_check_dimensions',)
check_dimensions = traits.Bool(
desc='Check that dimension info matches across files (default).',
argstr='-check_dimensions',
xor=_xor_check_dimensions)
no_check_dimensions = traits.Bool(
desc='Do not check dimension info.',
argstr='-nocheck_dimensions',
xor=_xor_check_dimensions)
_xor_format = (
'format_filetype',
'format_byte',
'format_short',
'format_int',
'format_long',
'format_float',
'format_double',
'format_signed',
'format_unsigned',
)
format_filetype = traits.Bool(
desc='Use data type of first file (default).',
argstr='-filetype',
xor=_xor_format)
format_byte = traits.Bool(
desc='Write out byte data.',
argstr='-byte',
xor=_xor_format)
format_short = traits.Bool(
desc='Write out short integer data.',
argstr='-short',
xor=_xor_format)
format_int = traits.Bool(
desc='Write out 32-bit integer data.',
argstr='-int',
xor=_xor_format)
format_long = traits.Bool(
desc='Superseded by -int.',
argstr='-long',
xor=_xor_format)
format_float = traits.Bool(
desc='Write out single-precision floating-point data.',
argstr='-float',
xor=_xor_format)
format_double = traits.Bool(
desc='Write out double-precision floating-point data.',
argstr='-double',
xor=_xor_format)
format_signed = traits.Bool(
desc='Write signed integer data.',
argstr='-signed',
xor=_xor_format)
format_unsigned = traits.Bool(
desc='Write unsigned integer data (default).',
argstr='-unsigned',
xor=_xor_format)
max_buffer_size_in_kb = traits.Range(
low=0,
desc='Specify the maximum size of the internal buffers (in kbytes).',
value=4096,
argstr='-max_buffer_size_in_kb %d',)
_xor_normalize = ('normalize', 'nonormalize',)
normalize = traits.Bool(
desc='Normalize data sets for mean intensity.',
argstr='-normalize',
xor=_xor_normalize)
nonormalize = traits.Bool(
desc='Do not normalize data sets (default).',
argstr='-nonormalize',
xor=_xor_normalize)
voxel_range = traits.Tuple(
traits.Int, traits.Int, argstr='-range %d %d',
desc='Valid range for output data.')
sdfile = traits.File(
desc='Specify an output sd file (default=none).',
argstr='-sdfile %s')
_xor_copy_header = ('copy_header', 'no_copy_header')
copy_header = traits.Bool(
desc='Copy all of the header from the first file (default for one file).',
argstr='-copy_header',
xor=_xor_copy_header)
no_copy_header = traits.Bool(
desc='Do not copy all of the header from the first file (default for many files)).',
argstr='-nocopy_header',
xor=_xor_copy_header)
avgdim = traits.Str(
desc='Specify a dimension along which we wish to average.',
argstr='-avgdim %s')
binarize = traits.Bool(
desc='Binarize the volume by looking for values in a given range.',
argstr='-binarize')
binrange = traits.Tuple(
traits.Float, traits.Float, argstr='-binrange %s %s',
desc='Specify a range for binarization. Default value: 1.79769e+308 -1.79769e+308.')
binvalue = traits.Float(
desc=('Specify a target value (+/- 0.5) for'
'binarization. Default value: -1.79769e+308'),
argstr='-binvalue %s')
weights = InputMultiPath(
traits.Str,
desc='Specify weights for averaging ("<w1>,<w2>,...").',
sep=',',
argstr='-weights %s',)
width_weighted = traits.Bool(
desc='Weight by dimension widths when -avgdim is used.',
argstr='-width_weighted',
requires=(
'avgdim',
))
class AverageOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Average(CommandLine):
"""Average a number of MINC files.
Examples
--------
>>> from nipype.interfaces.minc import Average
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> files = [nonempty_minc_data(i) for i in range(3)]
>>> average = Average(input_files=files, output_file='/tmp/tmp.mnc')
>>> average.run() # doctest: +SKIP
"""
input_spec = AverageInputSpec
output_spec = AverageOutputSpec
_cmd = 'mincaverage'
class BlobInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file to blob',
exists=True,
mandatory=True,
argstr='%s',
position=-2,)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_blob.mnc')
trace = traits.Bool(
desc='compute the trace (approximate growth and shrinkage) -- FAST',
argstr='-trace')
determinant = traits.Bool(
desc='compute the determinant (exact growth and shrinkage) -- SLOW',
argstr='-determinant')
translation = traits.Bool(
desc='compute translation (structure displacement)',
argstr='-translation')
magnitude = traits.Bool(
desc='compute the magnitude of the displacement vector',
argstr='-magnitude')
class BlobOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Blob(CommandLine):
"""Calculate blobs from minc deformation grids.
Examples
--------
>>> from nipype.interfaces.minc import Blob
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> blob = Blob(input_file=minc2Dfile, output_file='/tmp/tmp.mnc', trace=True)
>>> blob.run() # doctest: +SKIP
"""
input_spec = BlobInputSpec
output_spec = BlobOutputSpec
_cmd = 'mincblob'
class CalcInputSpec(CommandLineInputSpec):
_xor_input_files = ('input_files', 'filelist')
input_files = InputMultiPath(
traits.File(exists=True),
desc='input file(s) for calculation',
mandatory=True,
sep=' ',
argstr='%s',
position=-2,)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_files'],
hash_files=False,
name_template='%s_calc.mnc')
two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
_xor_verbose = ('verbose', 'quiet',)
verbose = traits.Bool(
desc='Print out log messages (default).',
argstr='-verbose',
xor=_xor_verbose)
quiet = traits.Bool(
desc='Do not print out log messages.',
argstr='-quiet',
xor=_xor_verbose)
debug = traits.Bool(desc='Print out debugging messages.', argstr='-debug')
filelist = traits.File(
desc='Specify the name of a file containing input file names.',
argstr='-filelist %s',
mandatory=True,
xor=_xor_input_files)
_xor_copy_header = ('copy_header', 'no_copy_header')
copy_header = traits.Bool(
desc='Copy all of the header from the first file.',
argstr='-copy_header',
xor=_xor_copy_header)
no_copy_header = traits.Bool(
desc='Do not copy all of the header from the first file.',
argstr='-nocopy_header',
xor=_xor_copy_header)
_xor_format = (
'format_filetype',
'format_byte',
'format_short',
'format_int',
'format_long',
'format_float',
'format_double',
'format_signed',
'format_unsigned',
)
format_filetype = traits.Bool(
desc='Use data type of first file (default).',
argstr='-filetype',
xor=_xor_format)
format_byte = traits.Bool(
desc='Write out byte data.',
argstr='-byte',
xor=_xor_format)
format_short = traits.Bool(
desc='Write out short integer data.',
argstr='-short',
xor=_xor_format)
format_int = traits.Bool(
desc='Write out 32-bit integer data.',
argstr='-int',
xor=_xor_format)
format_long = traits.Bool(
desc='Superseded by -int.',
argstr='-long',
xor=_xor_format)
format_float = traits.Bool(
desc='Write out single-precision floating-point data.',
argstr='-float',
xor=_xor_format)
format_double = traits.Bool(
desc='Write out double-precision floating-point data.',
argstr='-double',
xor=_xor_format)
format_signed = traits.Bool(
desc='Write signed integer data.',
argstr='-signed',
xor=_xor_format)
format_unsigned = traits.Bool(
desc='Write unsigned integer data (default).',
argstr='-unsigned',
xor=_xor_format)
voxel_range = traits.Tuple(
traits.Int, traits.Int, argstr='-range %d %d',
desc='Valid range for output data.',)
max_buffer_size_in_kb = traits.Range(
low=0,
desc='Specify the maximum size of the internal buffers (in kbytes).',
value=0,
usedefault=False,
argstr='-max_buffer_size_in_kb %d')
_xor_check_dimensions = ('check_dimensions', 'no_check_dimensions',)
check_dimensions = traits.Bool(
desc='Check that files have matching dimensions (default).',
argstr='-check_dimensions',
xor=_xor_check_dimensions)
no_check_dimensions = traits.Bool(
desc='Do not check that files have matching dimensions.',
argstr='-nocheck_dimensions',
xor=_xor_check_dimensions)
# FIXME Is it sensible to use ignore_nan and propagate_nan at the same
# time? Document this.
ignore_nan = traits.Bool(
desc='Ignore invalid data (NaN) for accumulations.',
argstr='-ignore_nan')
propagate_nan = traits.Bool(
desc='Invalid data in any file at a voxel produces a NaN (default).',
argstr='-propagate_nan')
# FIXME Double-check that these are mutually exclusive?
_xor_nan_zero_illegal = (
'output_nan',
'output_zero',
'output_illegal_value')
output_nan = traits.Bool(
desc='Output NaN when an illegal operation is done (default).',
argstr='-nan',
xor=_xor_nan_zero_illegal)
output_zero = traits.Bool(
desc='Output zero when an illegal operation is done.',
argstr='-zero',
xor=_xor_nan_zero_illegal)
output_illegal = traits.Bool(
desc='Value to write out when an illegal operation is done. Default value: 1.79769e+308',
argstr='-illegal_value',
xor=_xor_nan_zero_illegal)
_xor_expression = ('expression', 'expfile')
expression = traits.Str(
desc='Expression to use in calculations.',
argstr='-expression \'%s\'',
xor=_xor_expression,
mandatory=True)
expfile = traits.File(
desc='Name of file containing expression.',
argstr='-expfile %s',
xor=_xor_expression,
mandatory=True)
# FIXME test this one, the argstr will probably need tweaking, see
# _format_arg.
outfiles = traits.List(
traits.Tuple(
traits.Str,
traits.File,
argstr='-outfile %s %s',
desc=('List of (symbol, file) tuples indicating that output should be written'
'to the specified file, taking values from the symbol which should be'
'created in the expression (see the EXAMPLES section). If this option'
'is given, then all non-option arguments are taken as input files.'
'This option can be used multiple times for multiple output files.')))
eval_width = traits.Int(
200,
desc='Number of voxels to evaluate simultaneously.',
argstr='-eval_width %s',
usedefault=False)
class CalcOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Calc(CommandLine):
"""Compute an expression using MINC files as input.
Examples
--------
>>> from nipype.interfaces.minc import Calc
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> file0 = nonempty_minc_data(0)
>>> file1 = nonempty_minc_data(1)
>>> calc = Calc(input_files=[file0, file1], output_file='/tmp/calc.mnc', expression='A[0] + A[1]') # add files together
>>> calc.run() # doctest: +SKIP
"""
input_spec = CalcInputSpec
output_spec = CalcOutputSpec
_cmd = 'minccalc'
# FIXME mincbbox produces output like
#
# -5.000000 -5.000000 -5.000000 4.800000 2.800000 8.800000
#
# so perhaps this would be better returned as a pair of Python
# lists instead of sending to an output file?
class BBoxInputSpec(StdOutCommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,)
output_file = File(
desc='output file containing bounding box corners',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_bbox.txt',
keep_extension=False)
threshold = traits.Int(
0,
desc='VIO_Real value threshold for bounding box. Default value: 0.',
argstr='-threshold')
_xor_one_two = ('one_line', 'two_lines')
one_line = traits.Bool(
desc='Output on one line (default): start_x y z width_x y z',
argstr='-one_line',
xor=_xor_one_two)
two_lines = traits.Bool(
desc='Output on two lines: start_x y z \n width_x y z',
argstr='-two_lines',
xor=_xor_one_two)
format_mincresample = traits.Bool(
desc='Output format for mincresample: (-step x y z -start x y z -nelements x y z',
argstr='-mincresample')
format_mincreshape = traits.Bool(
desc='Output format for mincreshape: (-start x,y,z -count dx,dy,dz',
argstr='-mincreshape')
format_minccrop = traits.Bool(
desc='Output format for minccrop: (-xlim x1 x2 -ylim y1 y2 -zlim z1 z2',
argstr='-minccrop')
# FIXME Not implemented, will clash with our parsing of the output?
# Command-specific options:
# Options for logging progress. Default = -verbose.
# -verbose: Write messages indicating progress
# -quiet: Do not write log messages
# -debug: Print out debug info.
class BBoxOutputSpec(TraitedSpec):
output_file = File(
desc='output file containing bounding box corners',
exists=True)
class BBox(StdOutCommandLine):
"""Determine a bounding box of image.
Examples
--------
>>> from nipype.interfaces.minc import BBox
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> file0 = nonempty_minc_data(0)
>>> bbox = BBox(input_file=file0)
>>> bbox.run() # doctest: +SKIP
"""
input_spec = BBoxInputSpec
output_spec = BBoxOutputSpec
_cmd = 'mincbbox'
class BeastInputSpec(CommandLineInputSpec):
"""
TODO:
Command-specific options:
-verbose: Enable verbose output.
-positive: Specify mask of positive segmentation (inside mask) instead of the default mask.
-output_selection: Specify file to output selected files.
-count: Specify file to output the patch count.
-mask: Specify a segmentation mask instead of the the default mask.
-no_mask: Do not apply a segmentation mask. Perform the segmentation over the entire image.
-no_positive: Do not apply a positive mask.
Generic options for all commands:
-help: Print summary of command-line options and abort
-version: Print version number of program and exit
Copyright (C) 2011 Simon Fristed Eskildsen, Vladimir Fonov,
Pierrick Coupe, Jose V. Manjon
This program comes with ABSOLUTELY NO WARRANTY; for details type 'cat COPYING'.
This is free software, and you are welcome to redistribute it under certain
conditions; type 'cat COPYING' for details.
Usage: mincbeast [options] <library dir> <input> <output>
mincbeast -help
Get this example to work?
https://github.com/BIC-MNI/BEaST/blob/master/README.library
2.3 Source the minc-toolkit (if installed):
$ source /opt/minc/minc-toolkit-config.sh
2.4 Generate library by running:
$ beast_prepareADNIlib -flip <ADNI download directory> <BEaST library directory>
Example:
$ sudo beast_prepareADNIlib -flip Downloads/ADNI /opt/minc/share/beast-library-1.1
3. Test the setup
3.1 Normalize your data
$ beast_normalize -modeldir /opt/minc/share/icbm152_model_09c input.mnc normal.mnc normal.xfm
3.2 Run BEaST
$ mincbeast /opt/minc/share/beast-library-1.1 normal.mnc brainmask.mnc -conf /opt/minc/share/beast-library-1.1/default.2mm.conf -same_res
"""
probability_map = traits.Bool(
desc='Output the probability map instead of crisp mask.',
argstr='-probability')
flip_images = traits.Bool(
desc='Flip images around the mid-sagittal plane to increase patch count.',
argstr='-flip')
load_moments = traits.Bool(
desc=('Do not calculate moments instead use precalculated'
'library moments. (for optimization purposes)'),
argstr='-load_moments')
fill_holes = traits.Bool(
desc='Fill holes in the binary output.',
argstr='-fill')
median_filter = traits.Bool(
desc='Apply a median filter on the probability map.',
argstr='-median')
nlm_filter = traits.Bool(
desc='Apply an NLM filter on the probability map (experimental).',
argstr='-nlm_filter')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
configuration_file = traits.File(
desc='Specify configuration file.',
argstr='-configuration %s')
voxel_size = traits.Int(
4,
desc=('Specify voxel size for calculations (4, 2, or 1).'
'Default value: 4. Assumes no multiscale. Use configuration'
'file for multiscale.'),
argstr='-voxel_size %s')
abspath = traits.Bool(
desc='File paths in the library are absolute (default is relative to library root).',
argstr='-abspath',
usedefault=True,
default_value=True)
patch_size = traits.Int(
1,
desc='Specify patch size for single scale approach. Default value: 1.',
argstr='-patch_size %s')
search_area = traits.Int(
2,
desc='Specify size of search area for single scale approach. Default value: 2.',
argstr='-search_area %s')
confidence_level_alpha = traits.Float(
0.5,
desc='Specify confidence level Alpha. Default value: 0.5',
argstr='-alpha %s')
smoothness_factor_beta = traits.Float(
0.5,
desc='Specify smoothness factor Beta. Default value: 0.25',
argstr='-beta %s')
threshold_patch_selection = traits.Float(
0.95,
desc='Specify threshold for patch selection. Default value: 0.95',
argstr='-threshold %s')
number_selected_images = traits.Int(
20,
desc='Specify number of selected images. Default value: 20',
argstr='-selection_num %s')
same_resolution = traits.Bool(
desc='Output final mask with the same resolution as input file.',
argstr='-same_resolution')
library_dir = traits.Directory(
desc='library directory',
position=-3,
argstr='%s',
mandatory=True)
input_file = traits.File(
desc='input file',
position=-2,
argstr='%s',
mandatory=True)
output_file = traits.File(
desc='output file',
position=-1,
argstr='%s',
name_source=['input_file'],
hash_files=False,
name_template='%s_beast_mask.mnc')
class BeastOutputSpec(TraitedSpec):
output_file = File(desc='output mask file', exists=True)
class Beast(CommandLine):
"""Extract brain image using BEaST (Brain Extraction using
non-local Segmentation Technique).
Examples
--------
>>> from nipype.interfaces.minc import Beast
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> file0 = nonempty_minc_data(0)
>>> beast = Beast(input_file=file0)
>>> beast .run() # doctest: +SKIP
"""
input_spec = BeastInputSpec
output_spec = BeastOutputSpec
_cmd = 'mincbeast'
class PikInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,)
_xor_image_type = ('jpg', 'png')
jpg = traits.Bool(desc='Output a jpg file.', xor=_xor_image_type)
png = traits.Bool(desc='Output a png file (default).', xor=_xor_image_type)
output_file = File(
desc='output file',
argstr='%s',
genfile=True,
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s.png',
keep_extension=False)
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
# FIXME not implemented: --verbose
# --fake
# --lookup ==> arguments to pass to minclookup
scale = traits.Int(
2,
desc=('Scaling factor for resulting image. By default images are'
'output at twice their original resolution.'),
argstr='--scale %s')
width = traits.Int(
desc='Autoscale the resulting image to have a fixed image width (in pixels).',
argstr='--width %s')
depth = traits.Enum(
8,
16,
desc='Bitdepth for resulting image 8 or 16 (MSB machines only!)',
argstr='--depth %s')
_xor_title = ('title_string', 'title_with_filename')
title = traits.Either(
traits.Bool(desc='Use input filename as title in resulting image.'),
traits.Str(desc='Add a title to the resulting image.'),
argstr='%s') # see _format_arg for actual arg string
title_size = traits.Int(
desc='Font point size for the title.',
argstr='--title_size %s',
requires=['title'])
annotated_bar = traits.Bool(
desc='create an annotated bar to match the image (use height of the output image)',
argstr='--anot_bar')
# FIXME tuple of floats? Not voxel values? Man page doesn't specify.
minc_range = traits.Tuple(
traits.Float, traits.Float,
desc='Valid range of values for MINC file.',
argstr='--range %s %s')
_xor_image_range = ('image_range', 'auto_range')
image_range = traits.Tuple(
traits.Float, traits.Float,
desc='Range of image values to use for pixel intensity.',
argstr='--image_range %s %s',
xor=_xor_image_range)
auto_range = traits.Bool(
desc='Automatically determine image range using a 5 and 95% PcT. (histogram)',
argstr='--auto_range',
xor=_xor_image_range)
start = traits.Int(
desc='Slice number to get. (note this is in voxel co-ordinates).',
argstr='--slice %s') # FIXME Int is correct?
_xor_slice = ('slice_z', 'slice_y', 'slice_x')
slice_z = traits.Bool(
desc='Get an axial/transverse (z) slice.',
argstr='-z',
xor=_xor_slice)
slice_y = traits.Bool(
desc='Get a coronal (y) slice.',
argstr='-y',
xor=_xor_slice)
slice_x = traits.Bool(
desc='Get a sagittal (x) slice.',
argstr='-x',
xor=_xor_slice) # FIXME typo in man page? sagital?
triplanar = traits.Bool(
desc='Create a triplanar view of the input file.',
argstr='--triplanar')
tile_size = traits.Int(
desc='Pixel size for each image in a triplanar.',
argstr='--tilesize %s')
_xor_sagittal_offset = ('sagittal_offset', 'sagittal_offset_perc')
sagittal_offset = traits.Int(
desc='Offset the sagittal slice from the centre.',
argstr='--sagittal_offset %s')
sagittal_offset_perc = traits.Range(
low=0,
high=100,
desc='Offset the sagittal slice by a percentage from the centre.',
argstr='--sagittal_offset_perc %d',
)
_xor_vertical_horizontal = (
'vertical_triplanar_view',
'horizontal_triplanar_view')
vertical_triplanar_view = traits.Bool(
desc='Create a vertical triplanar view (Default).',
argstr='--vertical',
xor=_xor_vertical_horizontal)
horizontal_triplanar_view = traits.Bool(
desc='Create a horizontal triplanar view.',
argstr='--horizontal',
xor=_xor_vertical_horizontal)
lookup = traits.Str(
desc='Arguments to pass to minclookup',
argstr='--lookup %s')
class PikOutputSpec(TraitedSpec):
output_file = File(desc='output image', exists=True)
class Pik(CommandLine):
"""Generate images from minc files.
Mincpik uses Imagemagick to generate images
from Minc files.
Examples
--------
>>> from nipype.interfaces.minc import Pik
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> file0 = nonempty_minc_data(0)
>>> pik = Pik(input_file=file0, title='foo')
>>> pik .run() # doctest: +SKIP
"""
input_spec = PikInputSpec
output_spec = PikOutputSpec
_cmd = 'mincpik'
def _format_arg(self, name, spec, value):
if name == 'title':
if isinstance(value, bool) and value:
return '--title'
elif isinstance(value, str):
return '--title --title_text %s' % (value,)
else:
raise ValueError(
'Unknown value for "title" argument: ' + str(value))
return super(Pik, self)._format_arg(name, spec, value)
class BlurInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,)
output_file_base = File(
desc='output file base',
argstr='%s',
position=-1)
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
_xor_kernel = ('gaussian', 'rect')
gaussian = traits.Bool(
desc='Use a gaussian smoothing kernel (default).',
argstr='-gaussian',
xor=_xor_kernel)
rect = traits.Bool(
desc='Use a rect (box) smoothing kernel.',
argstr='-rect',
xor=_xor_kernel)
gradient = traits.Bool(
desc='Create the gradient magnitude volume as well.',
argstr='-gradient')
partial = traits.Bool(
desc='Create the partial derivative and gradient magnitude volumes as well.',
argstr='-partial')
no_apodize = traits.Bool(
desc='Do not apodize the data before blurring.',
argstr='-no_apodize')
_xor_main_options = ('fwhm', 'fwhm3d', 'standard_dev')
fwhm = traits.Float(
0,
desc='Full-width-half-maximum of gaussian kernel. Default value: 0.',
argstr='-fwhm %s',
xor=_xor_main_options,
mandatory=True)
standard_dev = traits.Float(
0,
desc='Standard deviation of gaussian kernel. Default value: 0.',
argstr='-standarddev %s',
xor=_xor_main_options,
mandatory=True)
fwhm3d = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
argstr='-3dfwhm %s %s %s',
desc=('Full-width-half-maximum of gaussian kernel.'
'Default value: -1.79769e+308 -1.79769e+308 -1.79769e+308.'),
xor=_xor_main_options,
mandatory=True)
dimensions = traits.Enum(
1,
2,
3,
desc='Number of dimensions to blur (either 1,2 or 3). Default value: 3.',
argstr='-dimensions %s',
default=3)
class BlurOutputSpec(TraitedSpec):
output_file = File(desc='Blurred output file.', exists=True)
gradient_dxyz = File(desc='Gradient dxyz.')
partial_dx = File(desc='Partial gradient dx.')
partial_dy = File(desc='Partial gradient dy.')
partial_dz = File(desc='Partial gradient dz.')
partial_dxyz = File(desc='Partial gradient dxyz.')
class Blur(StdOutCommandLine):
"""
Convolve an input volume with a Gaussian blurring kernel of
user-defined width. Optionally, the first partial derivatives
and the gradient magnitude volume can be calculated.
Examples
--------
>>> from nipype.interfaces.minc import Blur
>>> from nipype.interfaces.minc.testdata import minc3Dfile
(1) Blur an input volume with a 6mm fwhm isotropic Gaussian
blurring kernel:
>>> blur = Blur(input_file=minc3Dfile, fwhm=6, output_file_base='/tmp/out_6')
>>> blur.run() # doctest: +SKIP
mincblur will create /tmp/out_6_blur.mnc.
(2) Calculate the blurred and gradient magnitude data:
>>> blur = Blur(input_file=minc3Dfile, fwhm=6, gradient=True, output_file_base='/tmp/out_6')
>>> blur.run() # doctest: +SKIP
will create /tmp/out_6_blur.mnc and /tmp/out_6_dxyz.mnc.
(3) Calculate the blurred data, the partial derivative volumes
and the gradient magnitude for the same data:
>>> blur = Blur(input_file=minc3Dfile, fwhm=6, partial=True, output_file_base='/tmp/out_6')
>>> blur.run() # doctest: +SKIP
will create /tmp/out_6_blur.mnc, /tmp/out_6_dx.mnc,
/tmp/out_6_dy.mnc, /tmp/out_6_dz.mnc and /tmp/out_6_dxyz.mnc.
"""
input_spec = BlurInputSpec
output_spec = BlurOutputSpec
_cmd = 'mincblur'
def _gen_output_base(self):
output_file_base = self.inputs.output_file_base
if isdefined(output_file_base):
return output_file_base
else:
base_file_name = os.path.split(self.inputs.input_file)[
1] # e.g. 'foo.mnc'
base_file_name_no_ext = os.path.splitext(
base_file_name)[0] # e.g. 'foo'
output_base = os.path.join(
os.getcwd(),
base_file_name_no_ext +
'_bluroutput') # e.g. '/tmp/blah/foo_bluroutput'
# return os.path.splitext(self.inputs.input_file)[0] +
# '_bluroutput'
return output_base
def _list_outputs(self):
outputs = self.output_spec().get()
output_file_base = self._gen_output_base()
outputs['output_file'] = output_file_base + '_blur.mnc'
if isdefined(self.inputs.gradient):
outputs['gradient_dxyz'] = output_file_base + '_dxyz.mnc'
if isdefined(self.inputs.partial):
outputs['partial_dx'] = output_file_base + '_dx.mnc'
outputs['partial_dy'] = output_file_base + '_dy.mnc'
outputs['partial_dz'] = output_file_base + '_dz.mnc'
outputs['partial_dxyz'] = output_file_base + '_dxyz.mnc'
return outputs
@property
def cmdline(self):
output_file_base = self.inputs.output_file_base
if isdefined(output_file_base):
return super(Blur, self).cmdline
else:
# FIXME this seems like a bit of a hack. Can we force output_file
# to show up in cmdline by default, even if it isn't specified in
# the instantiation of Pik?
return '%s %s' % (super(Blur, self).cmdline,
self._gen_output_base())
class MathInputSpec(CommandLineInputSpec):
_xor_input_files = ('input_files', 'filelist')
input_files = InputMultiPath(
traits.File(exists=True),
desc='input file(s) for calculation',
mandatory=True,
sep=' ',
argstr='%s',
position=-2,
xor=_xor_input_files)
output_file = File(
desc='output file',
argstr='%s',
genfile=True,
position=-1,
name_source=['input_files'],
hash_files=False,
name_template='%s_mincmath.mnc')
filelist = traits.File(
desc='Specify the name of a file containing input file names.',
argstr='-filelist %s',
exists=True,
mandatory=True,
xor=_xor_input_files)
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2')
_xor_copy_header = ('copy_header', 'no_copy_header')
copy_header = traits.Bool(
desc='Copy all of the header from the first file (default for one file).',
argstr='-copy_header',
xor=_xor_copy_header)
no_copy_header = traits.Bool(
desc='Do not copy all of the header from the first file (default for many files)).',
argstr='-nocopy_header',
xor=_xor_copy_header)
_xor_format = (
'format_filetype',
'format_byte',
'format_short',
'format_int',
'format_long',
'format_float',
'format_double',
'format_signed',
'format_unsigned',
)
format_filetype = traits.Bool(
desc='Use data type of first file (default).',
argstr='-filetype',
xor=_xor_format)
format_byte = traits.Bool(
desc='Write out byte data.',
argstr='-byte',
xor=_xor_format)
format_short = traits.Bool(
desc='Write out short integer data.',
argstr='-short',
xor=_xor_format)
format_int = traits.Bool(
desc='Write out 32-bit integer data.',
argstr='-int',
xor=_xor_format)
format_long = traits.Bool(
desc='Superseded by -int.',
argstr='-long',
xor=_xor_format)
format_float = traits.Bool(
desc='Write out single-precision floating-point data.',
argstr='-float',
xor=_xor_format)
format_double = traits.Bool(
desc='Write out double-precision floating-point data.',
argstr='-double',
xor=_xor_format)
format_signed = traits.Bool(
desc='Write signed integer data.',
argstr='-signed',
xor=_xor_format)
format_unsigned = traits.Bool(
desc='Write unsigned integer data (default).',
argstr='-unsigned',
xor=_xor_format)
voxel_range = traits.Tuple(
traits.Int, traits.Int, argstr='-range %d %d',
desc='Valid range for output data.')
max_buffer_size_in_kb = traits.Range(
low=0,
desc='Specify the maximum size of the internal buffers (in kbytes).',
value=4096,
argstr='-max_buffer_size_in_kb %d',)
_xor_check_dimensions = ('check_dimensions', 'no_check_dimensions',)
check_dimensions = traits.Bool(
desc='Check that dimension info matches across files (default).',
argstr='-check_dimensions',
xor=_xor_check_dimensions)
no_check_dimensions = traits.Bool(
desc='Do not check dimension info.',
argstr='-nocheck_dimensions',
xor=_xor_check_dimensions)
dimension = traits.Str(
desc='Specify a dimension along which we wish to perform a calculation.',
argstr='-dimension %s')
# FIXME Is it sensible to use ignore_nan and propagate_nan at the same
# time? Document this.
ignore_nan = traits.Bool(
desc='Ignore invalid data (NaN) for accumulations.',
argstr='-ignore_nan')
propagate_nan = traits.Bool(
desc='Invalid data in any file at a voxel produces a NaN (default).',
argstr='-propagate_nan')
# FIXME Double-check that these are mutually exclusive?
_xor_nan_zero_illegal = (
'output_nan',
'output_zero',
'output_illegal_value')
output_nan = traits.Bool(
desc='Output NaN when an illegal operation is done (default).',
argstr='-nan',
xor=_xor_nan_zero_illegal)
output_zero = traits.Bool(
desc='Output zero when an illegal operation is done.',
argstr='-zero',
xor=_xor_nan_zero_illegal)
output_illegal = traits.Bool(
desc=('Value to write out when an illegal operation'
'is done. Default value: 1.79769e+308'),
argstr='-illegal_value',
xor=_xor_nan_zero_illegal)
# FIXME A whole bunch of the parameters will be mutually exclusive, e.g. surely can't do sqrt and abs at the same time?
# Or does mincmath do one and then the next?
##########################################################################
# Traits that expect a bool (compare two volumes) or constant (manipulate one volume) #
##########################################################################
bool_or_const_traits = [
'test_gt',
'test_lt',
'test_eq',
'test_ne',
'test_ge',
'test_le',
'calc_add',
'calc_sub',
'calc_mul',
'calc_div']
test_gt = traits.Either(
traits.Bool(),
traits.Float(),
desc='Test for vol1 > vol2 or vol1 > constant.',
argstr='-gt')
test_lt = traits.Either(
traits.Bool(),
traits.Float(),
desc='Test for vol1 < vol2 or vol1 < constant.',
argstr='-lt')
test_eq = traits.Either(
traits.Bool(),
traits.Float(),
desc='Test for integer vol1 == vol2 or vol1 == constant.',
argstr='-eq')
test_ne = traits.Either(
traits.Bool(),
traits.Float(),
desc='Test for integer vol1 != vol2 or vol1 != const.',
argstr='-ne')
test_ge = traits.Either(
traits.Bool(),
traits.Float(),
desc='Test for vol1 >= vol2 or vol1 >= const.',
argstr='-ge')
test_le = traits.Either(
traits.Bool(),
traits.Float(),
desc='Test for vol1 <= vol2 or vol1 <= const.',
argstr='-le')
calc_add = traits.Either(
traits.Bool(),
traits.Float(),
desc='Add N volumes or volume + constant.',
argstr='-add')
calc_sub = traits.Either(
traits.Bool(),
traits.Float(),
desc='Subtract 2 volumes or volume - constant.',
argstr='-sub')
calc_mul = traits.Either(
traits.Bool(),
traits.Float(),
desc='Multiply N volumes or volume * constant.',
argstr='-mult')
calc_div = traits.Either(
traits.Bool(),
traits.Float(),
desc='Divide 2 volumes or volume / constant.',
argstr='-div')
######################################
# Traits that expect a single volume #
######################################
single_volume_traits = [
'invert',
'calc_not',
'sqrt',
'square',
'abs',
'exp',
'log',
'scale',
'clamp',
'segment',
'nsegment',
'isnan',
'isnan'] # FIXME enforce this in _parse_inputs and check for other members
invert = traits.Either(
traits.Float(),
desc='Calculate 1/c.',
argstr='-invert -const %s')
calc_not = traits.Bool(desc='Calculate !vol1.', argstr='-not')
sqrt = traits.Bool(desc='Take square root of a volume.', argstr='-sqrt')
square = traits.Bool(desc='Take square of a volume.', argstr='-square')
abs = traits.Bool(desc='Take absolute value of a volume.', argstr='-abs')
exp = traits.Tuple(
traits.Float, traits.Float, argstr='-exp -const2 %s %s',
desc='Calculate c2*exp(c1*x). Both constants must be specified.')
log = traits.Tuple(
traits.Float, traits.Float, argstr='-log -const2 %s %s',
desc='Calculate log(x/c2)/c1. The constants c1 and c2 default to 1.')
scale = traits.Tuple(
traits.Float, traits.Float, argstr='-scale -const2 %s %s',
desc='Scale a volume: volume * c1 + c2.')
clamp = traits.Tuple(
traits.Float, traits.Float, argstr='-clamp -const2 %s %s',
desc='Clamp a volume to lie between two values.')
segment = traits.Tuple(
traits.Float, traits.Float, argstr='-segment -const2 %s %s',
desc='Segment a volume using range of -const2: within range = 1, outside range = 0.')
nsegment = traits.Tuple(
traits.Float, traits.Float, argstr='-nsegment -const2 %s %s',
desc='Opposite of -segment: within range = 0, outside range = 1.')
isnan = traits.Bool(desc='Test for NaN values in vol1.', argstr='-isnan')
nisnan = traits.Bool(desc='Negation of -isnan.', argstr='-nisnan')
############################################
# Traits that expect precisely two volumes #
############################################
two_volume_traits = ['percentdiff']
percentdiff = traits.Float(
desc='Percent difference between 2 volumes, thresholded (const def=0.0).',
argstr='-percentdiff')
#####################################
# Traits that expect N >= 1 volumes #
#####################################
n_volume_traits = [
'count_valid',
'maximum',
'minimum',
'calc_add',
'calc_or']
count_valid = traits.Bool(
desc='Count the number of valid values in N volumes.',
argstr='-count_valid')
maximum = traits.Bool(desc='Find maximum of N volumes.', argstr='-maximum')
minimum = traits.Bool(desc='Find minimum of N volumes.', argstr='-minimum')
calc_and = traits.Bool(
desc='Calculate vol1 && vol2 (&& ...).',
argstr='-and')
calc_or = traits.Bool(
desc='Calculate vol1 || vol2 (|| ...).',
argstr='-or')
class MathOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Math(StdOutCommandLine):
"""
Various mathematical operations supplied by mincmath.
Examples
--------
>>> from nipype.interfaces.minc import Math
>>> from nipype.interfaces.minc.testdata import minc2Dfile
Scale: volume*3.0 + 2:
>>> scale = Math(input_files=[minc2Dfile], scale=(3.0, 2))
>>> scale.run() # doctest: +SKIP
Test if >= 1.5:
>>> gt = Math(input_files=[minc2Dfile], test_gt=1.5)
>>> gt.run() # doctest: +SKIP
"""
input_spec = MathInputSpec
output_spec = MathOutputSpec
_cmd = 'mincmath'
def _format_arg(self, name, spec, value):
assert value is not None
if name in self.input_spec.bool_or_const_traits:
# t is unused, what was I trying to do with it?
# t = self.inputs.__getattribute__(name)
if isinstance(value, bool) and value:
return spec.argstr
elif isinstance(value, bool) and not value:
raise ValueError(
'Does not make sense to specify %s=False' %
(name,))
elif isinstance(value, float):
return '%s -const %s' % (spec.argstr, value,)
else:
raise ValueError('Invalid %s argument: %s' % (name, value,))
return super(Math, self)._format_arg(name, spec, value)
def _parse_inputs(self):
"""A number of the command line options expect precisely one or two files.
"""
nr_input_files = len(self.inputs.input_files)
for n in self.input_spec.bool_or_const_traits:
t = self.inputs.__getattribute__(n)
if isdefined(t):
if isinstance(t, bool):
if nr_input_files != 2:
raise ValueError(
'Due to the %s option we expected 2 files but input_files is of length %d' %
(n, nr_input_files,))
elif isinstance(t, float):
if nr_input_files != 1:
raise ValueError(
'Due to the %s option we expected 1 file but input_files is of length %d' %
(n, nr_input_files,))
else:
raise ValueError(
'Argument should be a bool or const, but got: %s' %
t)
for n in self.input_spec.single_volume_traits:
t = self.inputs.__getattribute__(n)
if isdefined(t):
if nr_input_files != 1:
raise ValueError(
'Due to the %s option we expected 1 file but input_files is of length %d' %
(n, nr_input_files,))
for n in self.input_spec.two_volume_traits:
t = self.inputs.__getattribute__(n)
if isdefined(t):
if nr_input_files != 2:
raise ValueError(
'Due to the %s option we expected 2 files but input_files is of length %d' %
(n, nr_input_files,))
for n in self.input_spec.n_volume_traits:
t = self.inputs.__getattribute__(n)
if isdefined(t):
if not nr_input_files >= 1:
raise ValueError(
'Due to the %s option we expected at least one file but input_files is of length %d' %
(n, nr_input_files,))
return super(Math, self)._parse_inputs()
class ResampleInputSpec(CommandLineInputSpec):
"""
not implemented:
-size: synonym for -nelements)
-xsize: synonym for -xnelements
-ysize: synonym for -ynelements
-zsize: synonym for -ynelements
"""
input_file = File(
desc='input file for resampling',
exists=True,
mandatory=True,
argstr='%s',
position=-2,)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_resample.mnc')
# This is a dummy input.
input_grid_files = InputMultiPath(
traits.File,
desc='input grid file(s)',)
two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
_xor_interpolation = (
'trilinear_interpolation',
'tricubic_interpolation',
'nearest_neighbour_interpolation',
'sinc_interpolation')
trilinear_interpolation = traits.Bool(
desc='Do trilinear interpolation.',
argstr='-trilinear',
xor=_xor_interpolation)
tricubic_interpolation = traits.Bool(
desc='Do tricubic interpolation.',
argstr='-tricubic',
xor=_xor_interpolation)
nearest_neighbour_interpolation = traits.Bool(
desc='Do nearest neighbour interpolation.',
argstr='-nearest_neighbour',
xor=_xor_interpolation)
sinc_interpolation = traits.Bool(
desc='Do windowed sinc interpolation.',
argstr='-sinc',
xor=_xor_interpolation)
half_width_sinc_window = traits.Enum(
5,
1,
2,
3,
4,
6,
7,
8,
9,
10,
desc='Set half-width of sinc window (1-10). Default value: 5.',
argstr='-width %s',
requires=['sinc_interpolation'])
_xor_sinc_window_type = ('sinc_window_hanning', 'sinc_window_hamming')
sinc_window_hanning = traits.Bool(
desc='Set sinc window type to Hanning.',
argstr='-hanning',
xor=_xor_sinc_window_type,
requires=['sinc_interpolation'])
sinc_window_hamming = traits.Bool(
desc='Set sinc window type to Hamming.',
argstr='-hamming',
xor=_xor_sinc_window_type,
requires=['sinc_interpolation'])
transformation = traits.File(
desc='File giving world transformation. (Default = identity).',
exists=True,
argstr='-transformation %s')
invert_transformation = traits.Bool(
desc='Invert the transformation before using it.',
argstr='-invert_transformation')
_xor_input_sampling = ('vio_transform', 'no_input_sampling')
vio_transform = traits.Bool(
desc='VIO_Transform the input sampling with the transform (default).',
argstr='-tfm_input_sampling',
xor=_xor_input_sampling)
no_input_sampling = traits.Bool(
desc='Use the input sampling without transforming (old behaviour).',
argstr='-use_input_sampling',
xor=_xor_input_sampling)
like = traits.File(
desc='Specifies a model file for the resampling.',
argstr='-like %s',
exists=True)
_xor_format = (
'format_byte',
'format_short',
'format_int',
'format_long',
'format_float',
'format_double',
'format_signed',
'format_unsigned',
)
format_byte = traits.Bool(
desc='Write out byte data.',
argstr='-byte',
xor=_xor_format)
format_short = traits.Bool(
desc='Write out short integer data.',
argstr='-short',
xor=_xor_format)
format_int = traits.Bool(
desc='Write out 32-bit integer data.',
argstr='-int',
xor=_xor_format)
format_long = traits.Bool(
desc='Superseded by -int.',
argstr='-long',
xor=_xor_format)
format_float = traits.Bool(
desc='Write out single-precision floating-point data.',
argstr='-float',
xor=_xor_format)
format_double = traits.Bool(
desc='Write out double-precision floating-point data.',
argstr='-double',
xor=_xor_format)
format_signed = traits.Bool(
desc='Write signed integer data.',
argstr='-signed',
xor=_xor_format)
format_unsigned = traits.Bool(
desc='Write unsigned integer data (default).',
argstr='-unsigned',
xor=_xor_format)
output_range = traits.Tuple(
traits.Float, traits.Float, argstr='-range %s %s',
desc='Valid range for output data. Default value: -1.79769e+308 -1.79769e+308.')
_xor_slices = ('transverse', 'sagittal', 'coronal')
transverse_slices = traits.Bool(
desc='Write out transverse slices.',
argstr='-transverse',
xor=_xor_slices)
sagittal_slices = traits.Bool(
desc='Write out sagittal slices',
argstr='-sagittal',
xor=_xor_slices)
coronal_slices = traits.Bool(
desc='Write out coronal slices',
argstr='-coronal',
xor=_xor_slices)
_xor_fill = ('nofill', 'fill')
no_fill = traits.Bool(
desc='Use value zero for points outside of input volume.',
argstr='-nofill',
xor=_xor_fill)
fill = traits.Bool(
desc='Use a fill value for points outside of input volume.',
argstr='-fill',
xor=_xor_fill)
fill_value = traits.Float(
desc=('Specify a fill value for points outside of input volume.'
'Default value: 1.79769e+308.'),
argstr='-fillvalue %s',
requires=['fill'])
_xor_scale = ('keep_real_range', 'nokeep_real_range')
keep_real_range = traits.Bool(
desc='Keep the real scale of the input volume.',
argstr='-keep_real_range',
xor=_xor_scale)
nokeep_real_range = traits.Bool(
desc='Do not keep the real scale of the data (default).',
argstr='-nokeep_real_range',
xor=_xor_scale)
_xor_spacetype = ('spacetype', 'talairach')
spacetype = traits.Str(
desc='Set the spacetype attribute to a specified string.',
argstr='-spacetype %s')
talairach = traits.Bool(
desc='Output is in Talairach space.',
argstr='-talairach')
origin = traits.Tuple(
traits.Float, traits.Float, traits.Float,
desc=('Origin of first pixel in 3D space.'
'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'),
argstr='-origin %s %s %s')
standard_sampling = traits.Bool(
desc='Set the sampling to standard values (step, start and dircos).',
argstr='-standard_sampling') # FIXME Bool?
units = traits.Str(
desc='Specify the units of the output sampling.',
argstr='-units %s') # FIXME String?
# Elements along each dimension.
# FIXME Ints? Ranges?
# FIXME Check that this xor behaves correctly.
_xor_nelements = ('nelements', 'nelements_x_y_or_z')
# nr elements along each dimension
nelements = traits.Tuple(
traits.Int,
traits.Int,
traits.Int,
desc='Number of elements along each dimension (X, Y, Z).',
argstr='-nelements %s %s %s',
xor=_xor_nelements)
# FIXME Is mincresample happy if we only specify one of these, or do we
# need the requires=...?
xnelements = traits.Int(
desc='Number of elements along the X dimension.',
argstr='-xnelements %s',
requires=('ynelements', 'znelements'),
xor=_xor_nelements)
ynelements = traits.Int(desc='Number of elements along the Y dimension.',
argstr='-ynelements %s',
requires=('xnelements', 'znelements'),
xor=_xor_nelements)
znelements = traits.Int(desc='Number of elements along the Z dimension.',
argstr='-znelements %s',
requires=('xnelements', 'ynelements'),
xor=_xor_nelements)
# step size along each dimension
_xor_step = ('step', 'step_x_y_or_z')
step = traits.Tuple(
traits.Int,
traits.Int,
traits.Int,
desc='Step size along each dimension (X, Y, Z). Default value: (0, 0, 0).',
argstr='-step %s %s %s',
xor=_xor_nelements)
# FIXME Use the requires=...?
xstep = traits.Int(
desc='Step size along the X dimension. Default value: 0.',
argstr='-xstep %s',
requires=('ystep', 'zstep'),
xor=_xor_step)
ystep = traits.Int(
desc='Step size along the Y dimension. Default value: 0.',
argstr='-ystep %s',
requires=(
'xstep',
'zstep'),
xor=_xor_step)
zstep = traits.Int(
desc='Step size along the Z dimension. Default value: 0.',
argstr='-zstep %s',
requires=(
'xstep',
'ystep'),
xor=_xor_step)
# start point along each dimension
_xor_start = ('start', 'start_x_y_or_z')
start = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
desc=('Start point along each dimension (X, Y, Z).'
'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'),
argstr='-start %s %s %s',
xor=_xor_nelements)
# FIXME Use the requires=...?
xstart = traits.Float(
desc='Start point along the X dimension. Default value: 1.79769e+308.',
argstr='-xstart %s',
requires=('ystart', 'zstart'),
xor=_xor_start)
ystart = traits.Float(
desc='Start point along the Y dimension. Default value: 1.79769e+308.',
argstr='-ystart %s',
requires=(
'xstart',
'zstart'),
xor=_xor_start)
zstart = traits.Float(
desc='Start point along the Z dimension. Default value: 1.79769e+308.',
argstr='-zstart %s',
requires=(
'xstart',
'ystart'),
xor=_xor_start)
# dircos along each dimension
_xor_dircos = ('dircos', 'dircos_x_y_or_z')
dircos = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
desc=('Direction cosines along each dimension (X, Y, Z). Default value:'
'1.79769e+308 1.79769e+308 1.79769e+308 1.79769e+308 ...'
' 1.79769e+308 1.79769e+308 1.79769e+308 1.79769e+308 1.79769e+308.'),
argstr='-dircos %s %s %s',
xor=_xor_nelements)
# FIXME Use the requires=...?
xdircos = traits.Float(
desc=('Direction cosines along the X dimension.'
'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'),
argstr='-xdircos %s',
requires=(
'ydircos',
'zdircos'),
xor=_xor_dircos)
ydircos = traits.Float(
desc=('Direction cosines along the Y dimension.'
'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'),
argstr='-ydircos %s',
requires=(
'xdircos',
'zdircos'),
xor=_xor_dircos)
zdircos = traits.Float(
desc=('Direction cosines along the Z dimension.'
'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'),
argstr='-zdircos %s',
requires=(
'xdircos',
'ydircos'),
xor=_xor_dircos)
class ResampleOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Resample(StdOutCommandLine):
"""
Resample a minc file.'
Examples
--------
>>> from nipype.interfaces.minc import Resample
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> r = Resample(input_file=minc2Dfile, output_file='/tmp/out.mnc') # Resample the file.
>>> r.run() # doctest: +SKIP
"""
input_spec = ResampleInputSpec
output_spec = ResampleOutputSpec
_cmd = 'mincresample'
class NormInputSpec(CommandLineInputSpec):
"""
Not implemented:
-version print version and exit
-verbose be verbose
-noverbose opposite of -verbose [default]
-quiet be quiet
-noquiet opposite of -quiet [default]
-fake do a dry run, (echo cmds only)
-nofake opposite of -fake [default]
"""
input_file = File(
desc='input file to normalise',
exists=True,
mandatory=True,
argstr='%s',
position=-2,)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_norm.mnc')
output_threshold_mask = traits.File(
desc='File in which to store the threshold mask.',
argstr='-threshold_mask %s',
name_source=['input_file'],
hash_files=False,
name_template='%s_norm_threshold_mask.mnc')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
# Normalisation Options
mask = traits.File(
desc='Calculate the image normalisation within a mask.',
argstr='-mask %s',
exists=True)
clamp = traits.Bool(
desc='Force the ouput range between limits [default].',
argstr='-clamp',
usedefault=True,
default_value=True)
cutoff = traits.Range(
low=0.0,
high=100.0,
desc='Cutoff value to use to calculate thresholds by a histogram PcT in %. [default: 0.01]',
argstr='-cutoff %s',
)
lower = traits.Float(desc='Lower real value to use.', argstr='-lower %s')
upper = traits.Float(desc='Upper real value to use.', argstr='-upper %s')
out_floor = traits.Float(
desc='Output files maximum [default: 0]',
argstr='-out_floor %s') # FIXME is this a float?
out_ceil = traits.Float(
desc='Output files minimum [default: 100]',
argstr='-out_ceil %s') # FIXME is this a float?
# Threshold Options
threshold = traits.Bool(
desc='Threshold the image (set values below threshold_perc to -out_floor).',
argstr='-threshold')
threshold_perc = traits.Range(
low=0.0,
high=100.0,
desc='Threshold percentage (0.1 == lower 10% of intensity range) [default: 0.1].',
argstr='-threshold_perc %s')
threshold_bmt = traits.Bool(
desc='Use the resulting image BiModalT as the threshold.',
argstr='-threshold_bmt')
threshold_blur = traits.Float(
desc='Blur FWHM for intensity edges then thresholding [default: 2].',
argstr='-threshold_blur %s')
class NormOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
output_threshold_mask = File(desc='threshold mask file')
class Norm(CommandLine):
"""Normalise a file between a max and minimum (possibly)
using two histogram pct's.
Examples
--------
>>> from nipype.interfaces.minc import Norm
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> n = Norm(input_file=minc2Dfile, output_file='/tmp/out.mnc') # Normalise the file.
>>> n.run() # doctest: +SKIP
"""
input_spec = NormInputSpec
output_spec = NormOutputSpec
_cmd = 'mincnorm'
"""
| volcentre will centre a MINC image's sampling about a point (0,0,0 typically)
|
| NB: It will modify the file in-place unless an outfile is given
|
| Problems or comments should be sent to: a.janke@gmail.com
Summary of options:
-version print version and exit
-verbose be verbose
-noverbose opposite of -verbose [default]
-clobber clobber existing check files
-noclobber opposite of -clobber [default]
-fake do a dry run, (echo cmds only)
-nofake opposite of -fake [default]
-com Use the CoM of the volume for the new centre (via mincstats)
-nocom opposite of -com [default]
-centre <float> <float> <float>
Centre to use (x,y,z) [default: 0 0 0]
-zero_dircos Set the direction cosines to identity [default]
-nozero_dirco opposite of -zero_dircos
Usage: volcentre [options] <infile.mnc> [<outfile.mnc>]
volcentre -help to list options
"""
class VolcentreInputSpec(CommandLineInputSpec):
"""
Not implemented:
-fake do a dry run, (echo cmds only)
-nofake opposite of -fake [default]
"""
input_file = File(
desc='input file to centre',
exists=True,
mandatory=True,
argstr='%s',
position=-2,)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_volcentre.mnc')
verbose = traits.Bool(
desc='Print out log messages. Default: False.',
argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
com = traits.Bool(
desc='Use the CoM of the volume for the new centre (via mincstats). Default: False',
argstr='-com')
centre = traits.Tuple(
traits.Float, traits.Float, traits.Float,
argstr='-centre %s %s %s',
desc='Centre to use (x,y,z) [default: 0 0 0].',)
zero_dircos = traits.Bool(
desc='Set the direction cosines to identity [default].',
argstr='-zero_dircos')
class VolcentreOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Volcentre(CommandLine):
"""Centre a MINC image's sampling about a point, typically (0,0,0).
Example
--------
>>> from nipype.interfaces.minc import Volcentre
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> vc = Volcentre(input_file=minc2Dfile)
>>> vc.run() # doctest: +SKIP
"""
input_spec = VolcentreInputSpec
output_spec = VolcentreOutputSpec
_cmd = 'volcentre'
class VolpadInputSpec(CommandLineInputSpec):
"""
Not implemented:
-fake do a dry run, (echo cmds only)
-nofake opposite of -fake [default]
| volpad pads a MINC volume
|
| Problems or comments should be sent to: a.janke@gmail.com
Summary of options:
-- General Options -------------------------------------------------------------
-verbose be verbose
-noverbose opposite of -verbose [default]
-clobber clobber existing files
-noclobber opposite of -clobber [default]
-fake do a dry run, (echo cmds only)
-nofake opposite of -fake [default]
"""
input_file = File(
desc='input file to centre',
exists=True,
mandatory=True,
argstr='%s',
position=-2,)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_volpad.mnc')
verbose = traits.Bool(
desc='Print out log messages. Default: False.',
argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
auto = traits.Bool(
desc='Automatically determine padding distances (uses -distance as max). Default: False.',
argstr='-auto')
auto_freq = traits.Float(
desc='Frequency of voxels over bimodalt threshold to stop at [default: 500].',
argstr='-auto_freq %s')
distance = traits.Int(
desc='Padding distance (in voxels) [default: 4].',
argstr='-distance %s')
smooth = traits.Bool(
desc='Smooth (blur) edges before padding. Default: False.',
argstr='-smooth')
smooth_distance = traits.Int(
desc='Smoothing distance (in voxels) [default: 4].',
argstr='-smooth_distance %s')
class VolpadOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Volpad(CommandLine):
"""Centre a MINC image's sampling about a point, typically (0,0,0).
Examples
--------
>>> from nipype.interfaces.minc import Volpad
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> vp = Volpad(input_file=minc2Dfile, smooth=True, smooth_distance=4)
>>> vp.run() # doctest: +SKIP
"""
input_spec = VolpadInputSpec
output_spec = VolpadOutputSpec
_cmd = 'volpad'
class VolisoInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file to convert to isotropic sampling',
exists=True,
mandatory=True,
argstr='%s',
position=-2,)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_voliso.mnc')
verbose = traits.Bool(
desc='Print out log messages. Default: False.',
argstr='--verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='--clobber',
usedefault=True,
default_value=True)
maxstep = traits.Float(
desc='The target maximum step desired in the output volume.',
argstr='--maxstep %s')
minstep = traits.Float(
desc='The target minimum step desired in the output volume.',
argstr='--minstep %s')
avgstep = traits.Bool(
desc='Calculate the maximum step from the average steps of the input volume.',
argstr='--avgstep')
class VolisoOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Voliso(CommandLine):
"""Changes the steps and starts in order that the output volume
has isotropic sampling.
Examples
--------
>>> from nipype.interfaces.minc import Voliso
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> viso = Voliso(input_file=minc2Dfile, minstep=0.1, avgstep=True)
>>> viso.run() # doctest: +SKIP
"""
input_spec = VolisoInputSpec
output_spec = VolisoOutputSpec
_cmd = 'voliso'
class GennlxfmInputSpec(CommandLineInputSpec):
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['like'],
hash_files=False,
name_template='%s_gennlxfm.xfm')
verbose = traits.Bool(
desc='Print out log messages. Default: False.',
argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
ident = traits.Bool(
desc='Generate an identity xfm. Default: False.',
argstr='-ident')
step = traits.Int(
desc='Output ident xfm step [default: 1].',
argstr='-step %s')
like = File(desc='Generate a nlxfm like this file.',
exists=True,
argstr='-like %s',)
class GennlxfmOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
output_grid = File(desc='output grid', exists=True)
class Gennlxfm(CommandLine):
"""Generate nonlinear xfms. Currently only identity xfms
are supported!
This tool is part of minc-widgets:
https://github.com/BIC-MNI/minc-widgets/blob/master/gennlxfm/gennlxfm
Examples
--------
>>> from nipype.interfaces.minc import Gennlxfm
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> gennlxfm = Gennlxfm(step=1, like=minc2Dfile)
>>> gennlxfm.run() # doctest: +SKIP
"""
input_spec = GennlxfmInputSpec
output_spec = GennlxfmOutputSpec
_cmd = 'gennlxfm'
def _list_outputs(self):
outputs = super(Gennlxfm, self)._list_outputs()
outputs['output_grid'] = re.sub(
'.(nlxfm|xfm)$', '_grid_0.mnc', outputs['output_file'])
return outputs
class XfmConcatInputSpec(CommandLineInputSpec):
input_files = InputMultiPath(
traits.File(exists=True),
desc='input file(s)',
mandatory=True,
sep=' ',
argstr='%s',
position=-2)
# This is a dummy input.
input_grid_files = InputMultiPath(
traits.File,
desc='input grid file(s)',)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_files'],
hash_files=False,
name_template='%s_xfmconcat.xfm')
verbose = traits.Bool(
desc='Print out log messages. Default: False.',
argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
class XfmConcatOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
output_grids = OutputMultiPath(File(exists=True), desc='output grids')
class XfmConcat(CommandLine):
"""Concatenate transforms together. The output transformation
is equivalent to applying input1.xfm, then input2.xfm, ..., in
that order.
Examples
--------
>>> from nipype.interfaces.minc import XfmConcat
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> conc = XfmConcat(input_files=['input1.xfm', 'input1.xfm'])
>>> conc.run() # doctest: +SKIP
"""
input_spec = XfmConcatInputSpec
output_spec = XfmConcatOutputSpec
_cmd = 'xfmconcat'
def _list_outputs(self):
outputs = super(XfmConcat, self)._list_outputs()
if os.path.exists(outputs['output_file']):
if 'grid' in open(outputs['output_file'], 'r').read():
outputs['output_grids'] = glob.glob(
re.sub(
'.(nlxfm|xfm)$',
'_grid_*.mnc',
outputs['output_file']))
return outputs
class BestLinRegInputSpec(CommandLineInputSpec):
source = File(
desc='source Minc file',
exists=True,
mandatory=True,
argstr='%s',
position=-4,)
target = File(
desc='target Minc file',
exists=True,
mandatory=True,
argstr='%s',
position=-3,)
output_xfm = File(
desc='output xfm file',
genfile=True,
argstr='%s',
position=-2,
name_source=['source'],
hash_files=False,
name_template='%s_bestlinreg.xfm',
keep_extension=False)
output_mnc = File(
desc='output mnc file',
genfile=True,
argstr='%s',
position=-1,
name_source=['source'],
hash_files=False,
name_template='%s_bestlinreg.mnc',
keep_extension=False)
verbose = traits.Bool(
desc='Print out log messages. Default: False.',
argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
# FIXME Very bare implementation, none of these are done yet:
"""
-init_xfm initial transformation (default identity)
-source_mask source mask to use during fitting
-target_mask target mask to use during fitting
-lsq9 use 9-parameter transformation (default)
-lsq12 use 12-parameter transformation (default -lsq9)
-lsq6 use 6-parameter transformation
"""
class BestLinRegOutputSpec(TraitedSpec):
output_xfm = File(desc='output xfm file', exists=True)
output_mnc = File(desc='output mnc file', exists=True)
class BestLinReg(CommandLine):
"""Hierachial linear fitting between two files.
The bestlinreg script is part of the EZminc package:
https://github.com/BIC-MNI/EZminc/blob/master/scripts/bestlinreg.pl
Examples
--------
>>> from nipype.interfaces.minc import BestLinReg
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> input_file = nonempty_minc_data(0)
>>> target_file = nonempty_minc_data(1)
>>> linreg = BestLinReg(source=input_file, target=target_file)
>>> linreg.run() # doctest: +SKIP
"""
input_spec = BestLinRegInputSpec
output_spec = BestLinRegOutputSpec
_cmd = 'bestlinreg'
class NlpFitInputSpec(CommandLineInputSpec):
source = File(
desc='source Minc file',
exists=True,
mandatory=True,
argstr='%s',
position=-3,)
target = File(
desc='target Minc file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,)
output_xfm = File(
desc='output xfm file',
genfile=True,
argstr='%s',
position=-1,)
# This is a dummy input.
input_grid_files = InputMultiPath(
traits.File,
desc='input grid file(s)',)
config_file = File(
desc='File containing the fitting configuration use.',
argstr='-config_file %s',
mandatory=True,
exists=True)
init_xfm = File(
desc='Initial transformation (default identity).',
argstr='-init_xfm %s',
mandatory=True,
exists=True)
source_mask = File(
desc='Source mask to use during fitting.',
argstr='-source_mask %s',
mandatory=True,
exists=True)
verbose = traits.Bool(
desc='Print out log messages. Default: False.',
argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
class NlpFitOutputSpec(TraitedSpec):
output_xfm = File(desc='output xfm file', exists=True)
output_grid = File(desc='output grid file', exists=True)
class NlpFit(CommandLine):
"""Hierarchial non-linear fitting with bluring.
This tool is part of the minc-widgets package:
https://github.com/BIC-MNI/minc-widgets/blob/master/nlpfit/nlpfit
Examples
--------
>>> from nipype.interfaces.minc import NlpFit
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data, nlp_config
>>> from nipype.testing import example_data
>>> source = nonempty_minc_data(0)
>>> target = nonempty_minc_data(1)
>>> source_mask = nonempty_minc_data(2)
>>> config = nlp_config
>>> initial = example_data('minc_initial.xfm')
>>> nlpfit = NlpFit(config_file=config, init_xfm=initial, source_mask=source_mask, source=source, target=target)
>>> nlpfit.run() # doctest: +SKIP
"""
input_spec = NlpFitInputSpec
output_spec = NlpFitOutputSpec
_cmd = 'nlpfit'
def _gen_filename(self, name):
if name == 'output_xfm':
output_xfm = self.inputs.output_xfm
if isdefined(output_xfm):
return os.path.abspath(output_xfm)
else:
return aggregate_filename(
[self.inputs.source, self.inputs.target], 'nlpfit_xfm_output') + '.xfm'
else:
raise NotImplemented
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['output_xfm'] = os.path.abspath(
self._gen_filename('output_xfm'))
assert os.path.exists(outputs['output_xfm'])
if 'grid' in open(outputs['output_xfm'], 'r').read():
outputs['output_grid'] = re.sub(
'.(nlxfm|xfm)$', '_grid_0.mnc', outputs['output_xfm'])
return outputs
class XfmAvgInputSpec(CommandLineInputSpec):
input_files = InputMultiPath(
traits.File(exists=True),
desc='input file(s)',
mandatory=True,
sep=' ',
argstr='%s',
position=-2)
# This is a dummy input.
input_grid_files = InputMultiPath(
traits.File,
desc='input grid file(s)',)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,)
verbose = traits.Bool(
desc='Print out log messages. Default: False.',
argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
# FIXME xor these:
avg_linear = traits.Bool(
desc='average the linear part [default].',
argstr='-avg_linear')
avg_nonlinear = traits.Bool(
desc='average the non-linear part [default].',
argstr='-avg_nonlinear')
ignore_linear = traits.Bool(
desc='opposite of -avg_linear.',
argstr='-ignore_linear')
ignore_nonlinear = traits.Bool(
desc='opposite of -avg_nonlinear.',
argstr='-ignore_nonline')
class XfmAvgOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
output_grid = File(desc='output grid file', exists=True)
class XfmAvg(CommandLine):
"""Average a number of xfm transforms using matrix logs and exponents.
The program xfmavg calls Octave for numerical work.
This tool is part of the minc-widgets package:
https://github.com/BIC-MNI/minc-widgets/tree/master/xfmavg
Examples
--------
>>> from nipype.interfaces.minc import XfmAvg
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data, nlp_config
>>> from nipype.testing import example_data
>>> xfm1 = example_data('minc_initial.xfm')
>>> xfm2 = example_data('minc_initial.xfm') # cheating for doctest
>>> xfmavg = XfmAvg(input_files=[xfm1, xfm2])
>>> xfmavg.run() # doctest: +SKIP
"""
input_spec = XfmAvgInputSpec
output_spec = XfmAvgOutputSpec
_cmd = 'xfmavg'
def _gen_filename(self, name):
if name == 'output_file':
output_file = self.inputs.output_file
if isdefined(output_file):
return os.path.abspath(output_file)
else:
return aggregate_filename(
self.inputs.input_files, 'xfmavg_output') + '.xfm'
else:
raise NotImplemented
def _gen_outfilename(self):
return self._gen_filename('output_file')
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['output_file'] = os.path.abspath(self._gen_outfilename())
assert os.path.exists(outputs['output_file'])
if 'grid' in open(outputs['output_file'], 'r').read():
outputs['output_grid'] = re.sub(
'.(nlxfm|xfm)$', '_grid_0.mnc', outputs['output_file'])
return outputs
class XfmInvertInputSpec(CommandLineInputSpec):
input_file = traits.File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,)
verbose = traits.Bool(
desc='Print out log messages. Default: False.',
argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
class XfmInvertOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
output_grid = File(desc='output grid file', exists=True)
class XfmInvert(CommandLine):
"""Invert an xfm transform file.
Examples
--------
>>> from nipype.interfaces.minc import XfmAvg
>>> from nipype.testing import example_data
>>> xfm = example_data('minc_initial.xfm')
>>> invert = XfmInvert(input_file=xfm)
>>> invert.run() # doctest: +SKIP
"""
input_spec = XfmInvertInputSpec
output_spec = XfmInvertOutputSpec
_cmd = 'xfminvert'
def _gen_filename(self, name):
if name == 'output_file':
output_file = self.inputs.output_file
if isdefined(output_file):
return os.path.abspath(output_file)
else:
return aggregate_filename(
[self.inputs.input_file], 'xfminvert_output') + '.xfm'
else:
raise NotImplemented
def _gen_outfilename(self):
return self._gen_filename('output_file')
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['output_file'] = os.path.abspath(self._gen_outfilename())
assert os.path.exists(outputs['output_file'])
if 'grid' in open(outputs['output_file'], 'r').read():
outputs['output_grid'] = re.sub(
'.(nlxfm|xfm)$', '_grid_0.mnc', outputs['output_file'])
return outputs
class BigAverageInputSpec(CommandLineInputSpec):
input_files = InputMultiPath(
traits.File(exists=True),
desc='input file(s)',
mandatory=True,
sep=' ',
argstr='%s',
position=-2)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_files'],
hash_files=False,
name_template='%s_bigaverage.mnc')
verbose = traits.Bool(
desc='Print out log messages. Default: False.',
argstr='--verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='--clobber',
usedefault=True,
default_value=True)
# FIXME Redumentary implementation, various parameters not implemented.
# TODO!
output_float = traits.Bool(
desc='Output files with float precision.',
argstr='--float')
robust = traits.Bool(
desc=('Perform robust averaging, features that are outside 1 standard'
'deviation from the mean are downweighted. Works well for noisy'
'data with artifacts. see the --tmpdir option if you have a'
'large number of input files.'),
argstr='-robust')
# Should Nipype deal with where the temp directory is?
tmpdir = Directory(desc='temporary files directory', argstr='-tmpdir %s')
sd_file = File(
desc='Place standard deviation image in specified file.',
argstr='--sdfile %s',
name_source=['input_files'],
hash_files=False,
name_template='%s_bigaverage_stdev.mnc')
class BigAverageOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
sd_file = File(desc='standard deviation image', exists=True)
class BigAverage(CommandLine):
"""Average 1000's of MINC files in linear time.
mincbigaverage is designed to discretise the problem of averaging either
a large number of input files or averaging a smaller number of large
files. (>1GB each). There is also some code included to perform "robust"
averaging in which only the most common features are kept via down-weighting
outliers beyond a standard deviation.
One advantage of mincbigaverage is that it avoids issues around the number
of possible open files in HDF/netCDF. In short if you have more than 100
files open at once while averaging things will slow down significantly.
mincbigaverage does this via a iterative approach to averaging files and
is a direct drop in replacement for mincaverage. That said not all the
arguments of mincaverage are supported in mincbigaverage but they should
be.
This tool is part of the minc-widgets package:
https://github.com/BIC-MNI/minc-widgets/blob/master/mincbigaverage/mincbigaverage
Examples
--------
>>> from nipype.interfaces.minc import BigAverage
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> files = [nonempty_minc_data(i) for i in range(3)]
>>> average = BigAverage(input_files=files, output_float=True, robust=True)
>>> average.run() # doctest: +SKIP
"""
input_spec = BigAverageInputSpec
output_spec = BigAverageOutputSpec
_cmd = 'mincbigaverage'
class ReshapeInputSpec(CommandLineInputSpec):
input_file = traits.File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_reshape.mnc')
verbose = traits.Bool(
desc='Print out log messages. Default: False.',
argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
# FIXME MANY options not implemented!
write_short = traits.Bool(
desc='Convert to short integer data.',
argstr='-short')
class ReshapeOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Reshape(CommandLine):
"""Cut a hyperslab out of a minc file, with dimension reordering.
This is also useful for rewriting with a different format, for
example converting to short (see example below).
Examples
--------
>>> from nipype.interfaces.minc import Reshape
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> input_file = nonempty_minc_data(0)
>>> reshape_to_short = Reshape(input_file=input_file, write_short=True)
>>> reshape_to_short.run() # doctest: +SKIP
"""
input_spec = ReshapeInputSpec
output_spec = ReshapeOutputSpec
_cmd = 'mincreshape'
class VolSymmInputSpec(CommandLineInputSpec):
input_file = traits.File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-3)
trans_file = traits.File(
desc='output xfm trans file',
genfile=True,
argstr='%s',
position=-2,
name_source=['input_file'],
hash_files=False,
name_template='%s_vol_symm.xfm',
keep_extension=False)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_vol_symm.mnc')
# This is a dummy input.
input_grid_files = InputMultiPath(
traits.File,
desc='input grid file(s)',)
verbose = traits.Bool(
desc='Print out log messages. Default: False.',
argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
# FIXME MANY options not implemented!
fit_linear = traits.Bool(desc='Fit using a linear xfm.', argstr='-linear')
fit_nonlinear = traits.Bool(
desc='Fit using a non-linear xfm.',
argstr='-nonlinear')
# FIXME This changes the input/output behaviour of trans_file! Split into
# two separate interfaces?
nofit = traits.Bool(
desc='Use the input transformation instead of generating one.',
argstr='-nofit')
config_file = File(
desc='File containing the fitting configuration (nlpfit -help for info).',
argstr='-config_file %s',
exists=True)
x = traits.Bool(desc='Flip volume in x-plane (default).', argstr='-x')
y = traits.Bool(desc='Flip volume in y-plane.', argstr='-y')
z = traits.Bool(desc='Flip volume in z-plane.', argstr='-z')
class VolSymmOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
trans_file = File(desc='xfm trans file', exists=True)
output_grid = File(desc='output grid file',
exists=True) # FIXME Is exists=True correct?
class VolSymm(CommandLine):
"""Make a volume symmetric about an axis either linearly
and/or nonlinearly. This is done by registering a volume
to a flipped image of itself.
This tool is part of the minc-widgets package:
https://github.com/BIC-MNI/minc-widgets/blob/master/volsymm/volsymm
Examples
--------
>>> from nipype.interfaces.minc import VolSymm
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> input_file = nonempty_minc_data(0)
>>> volsymm = VolSymm(input_file=input_file)
>>> volsymm.run() # doctest: +SKIP
"""
input_spec = VolSymmInputSpec
output_spec = VolSymmOutputSpec
_cmd = 'volsymm'
def _list_outputs(self):
outputs = super(VolSymm, self)._list_outputs()
# Have to manually check for the grid files.
if os.path.exists(outputs['trans_file']):
if 'grid' in open(outputs['trans_file'], 'r').read():
outputs['output_grid'] = re.sub(
'.(nlxfm|xfm)$', '_grid_0.mnc', outputs['trans_file'])
return outputs
|
sgiavasis/nipype
|
nipype/interfaces/minc/minc.py
|
Python
|
bsd-3-clause
| 111,261
|
[
"Gaussian",
"NetCDF"
] |
16868e0ccf54da9db6cf5ae60407b7d3006515cc526509a54a307aa878d6de0d
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.