text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
This is a program use for scale VCF file into small ones by
custom parameter. And compress all the sub-file by bgzip and
build the tabix index.
Copyright (c) Shujia Huang
Date: 2015-08-11
"""
import sys
import os
import re
import optparse
import string
import time
from subprocess import Popen, PIPE
import pysam # I use the tabix module in pysam
def get_opt():
""" Loading parameter for scaling vcf. """
usage = '\nUsage: python '
optp = optparse.OptionParser(usage=usage)
optp.add_option('-v', '--vcf', dest = 'vcffile', metavar = 'VCF',
help = 'Variants. VCF format.', default = '')
optp.add_option('-c', '--chr', dest='ref_chrom' , metavar='CHR',
help = 'The chrom ID of Reference[ALL]. e.g. '
'-c chr1 or -c chr1,chr2', default = '')
optp.add_option('-n', '--num', dest = 'number', metavar = 'INT',
help = 'The number you intend to split input by -v.',
default = 10)
optp.add_option('-d', '--outdir', dest = 'outdir', metavar = 'DIR',
help = 'The subfile output directory.', default = '.')
optp.add_option('--rec', dest = 'recursive', metavar = 'Boolen',
help = 'Recursvie to split the input vcffile(-v) by '
'its chromosome IDs(-c) or not.', default = '')
# For Job running
optp.add_option('-p', '--prefix', dest = 'outprefix', metavar = 'OUT',
help = 'Out file prefix', default = 'test')
optp.add_option('--prog', dest = 'prog', metavar = 'STR',
help = 'The program for jobs', default = '')
optp.add_option('--cmp', dest = 'comparameter', metavar = 'STR',
help = 'Common parameter for --prog', default = '')
optp.add_option('--qsub', dest = 'qsub', metavar = 'QSUB',
help = 'qsub parameters for jobs', default = '')
opt, _ = optp.parse_args()
if not opt.vcffile: optp.error('Required [-v vcffile]\n')
print >> sys.stderr, 'Parameters: python', ' '.join(sys.argv)
opt.number = int(abs(string.atoi(opt.number)))
opt.recursive = True if opt.recursive or opt.ref_chrom else False
if opt.prog:
opt.prog = os.path.abspath(opt.prog)
# Create the outdir if it's not exists
if not os.path.exists(opt.outdir):
os.makedirs(opt.outdir)
opt.outdir = os.path.abspath(opt.outdir)
return opt
def main(opt):
if opt.number > 0 or opt.recursive:
sub_vcf_files = splitVCF(opt.vcffile, opt.ref_chrom, opt.number,
opt.outdir + '/tmp_in_dir', opt.recursive)
else:
# Don't need to split the VCF file
sub_vcf_files = [opt.vcffile]
outinfo = createJobScript(opt.prog, opt.comparameter, sub_vcf_files,
opt.outdir, opt.outprefix, opt.recursive)
# Qsub the jobs
if opt.qsub:
qsubJobs(opt.qsub, [q[0] for q in outinfo])
print '#Shell_Script\tOutput_file\tOutput_log'
print '\n'.join(['\t'.join(s) for s in outinfo])
def qsubJobs(qsub_cmd, jobscripts):
"""
Submitting jobs by qsub_cmd.
Args:
`qsub_cmd`: qsub command. It's depend on your computer cluster.
"""
import commands
for q in jobscripts:
sh_dir = os.path.dirname(os.path.abspath(q))
(err_stat, job_id) = commands.getstatusoutput('cd %s && %s %s' %
(sh_dir, qsub_cmd, q))
if not err_stat:
print >> sys.stderr, '[Good] Submitting job %s (%s) done' % (q, job_id)
else:
print >> sys.stderr, '[ERRR] Submitting job %s (%s) fail' % (q, job_id)
def createJobScript(program, com_parameters, input_files,
outdir, outprefix, recursive):
"""
Create job script.
Args:
"""
tmp_out_dir = outdir + '/tmp_out_dir'
shell_dir = outdir + '/shell'
for d in (tmp_out_dir, shell_dir):
if not os.path.exists(d):
os.makedirs(d)
outinfo = []
for i, file in enumerate(input_files):
fh = pysam.TabixFile(file)
for chr in fh.contigs:
# For output files
sub_o_dir = tmp_out_dir + '/' + chr if recursive else tmp_out_dir
if not os.path.exists(sub_o_dir):
os.makedirs(sub_o_dir)
outpfx = sub_o_dir + '/' + outprefix
# For shell
sub_s_dir = shell_dir + '/' + chr if recursive else shell_dir
if not os.path.exists(sub_s_dir):
os.makedirs(sub_s_dir)
shell_pfx = sub_s_dir + '/' + outprefix
sub_out_file = '.'.join([outpfx, str(i + 1), chr, 'vcf'])
sub_out_log = '.'.join([outpfx, str(i + 1), chr, 'log'])
sub_sh_file = '.'.join([shell_pfx, str(i + 1), chr, 'sh'])
outinfo.append([sub_sh_file, sub_out_file, sub_out_log])
sh = open(sub_sh_file, 'w')
sh.write('time python %s genotype %s -c %s -v %s > %s 2> %s\n' %
(program, com_parameters, chr, file, sub_out_file,
sub_out_log))
sh.close()
fh.close()
return outinfo
def splitVCF(vcffile, ref_chrom, split_num, sub_outdir, is_rec_split = True):
"""
Split the input vcffile into pieces.
"""
if not os.path.exists(sub_outdir):
os.makedirs(sub_outdir)
print >> sys.stderr, '[INFO] ** Countting vcf lines. **'
vcf_line_count, chrom_ids = _get_vcf_line_count(vcffile, ref_chrom)
print >> sys.stderr, '[INFO] ** Splitting vcf file. **'
vcf_reader = pysam.TabixFile(vcffile)
vcf_header = '\n'.join([h for h in vcf_reader.header])
# get vcffile's file name by os.path.split
_, fname = os.path.split(vcffile)
sub_vcf_files = []
if is_rec_split:
"""
Split the whole vcf file by different chrom in `chrom_ids`.
"""
for chrom in chrom_ids:
sub_chr_dir = sub_outdir + '/' + chrom
if not os.path.exists(sub_chr_dir):
os.makedirs(sub_chr_dir)
tot_num, lniof = _set_step_num(vcf_line_count[chrom], split_num)
outprefix = sub_chr_dir + '/tmp.in.' + fname + '.' + chrom
print >> sys.stderr, ('[INFO] ** Splitting VCF file of %s '
'into %d. **' % (chrom, tot_num))
for f in outputSubVCF(vcf_reader.fetch(chrom),
vcf_header,
lniof,
tot_num,
outprefix):
sub_vcf_files.append(f)
else:
outprefix = sub_outdir + '/tmp.in.' + fname
tot_num, lniof = _set_step_num(vcf_line_count['all'], split_num)
sub_vcf_files = outputSubVCF(vcf_reader, vcf_header, lniof,
tot_num, outprefix)
vcf_reader.close()
# compress all the sub file for tabix
for i, f in enumerate(sub_vcf_files):
# Build the tabix index. The method will automatically compressed the
# file if which name does not end with '.gz' and the original file will
# be removed and only the compressed file will be retained
f = pysam.tabix_index(sub_vcf_files[i], force = True, preset = 'vcf')
sub_vcf_files[i] = f # The compressed file after tabix
print >> sys.stderr, '[INFO] ** Splited files all done. **'
return sub_vcf_files
def outputSubVCF(vcf_reader, vcf_header, line_num_in_one_file, t_f_n,
outfile_prefix):
"""
Split the whole vcf file into several sub-files. Compress by bgzip
and creat the tabix index.
return all the path of subfiles as a list.
Args:
`vcf_reader`: VCF reader handle.
`t_f_n`: Total file numbers. ('split_num' same as opt.number)
"""
line_num = 0
sfn = 0
sub_files = []
vcf_writer = None
for r in vcf_reader:
if line_num % 100000 == 0:
print >> sys.stderr, ('[INFO] >> outputting %d lines in '
'sub file. %s' %
(line_num + 1, time.asctime()))
if line_num % line_num_in_one_file == 0:
if vcf_writer:
vcf_writer.close()
sfn += 1
sub_file = outfile_prefix + '.' + str(sfn) + '_' + str(t_f_n) + '.vcf'
vcf_writer = open(sub_file, 'w')
print >> vcf_writer, vcf_header # Output VCF header
sub_files.append(sub_file)
# Output the VCF record
print >> vcf_writer, r
line_num += 1
if vcf_writer:
vcf_writer.close()
print >> sys.stderr, ('[INFO] >> All outputting %d lines in '
'sub file. %s' % (line_num, time.asctime()))
return sub_files
def _get_vcf_line_count(vcffile, chrom_id):
"""
Get line number of this vcf file. And record them in a dict.
"""
f = pysam.TabixFile(vcffile)
chrom_id_set = set(f.contigs) # Initial
f.close()
if chrom_id:
chrom_id_set = set(chrom_id.split(','))
# Initial the dict
line_count = {k: 0 for k in chrom_id_set}
line_count['all'] = 0
I = os.popen('gzip -dc %s' % vcffile) if vcffile[-3:] == '.gz' else open(vcffile)
while 1:
lines = I.readlines(100000)
if not lines: break
for line in lines:
if re.search(r'^#', line): continue
line_count['all'] += 1
if line_count['all'] % 100000 == 0:
print >> sys.stderr, ('[INFO] >> Countting %d lines. << %s' %
(line_count['all'], time.asctime()))
col = line.strip('\n').split()
if col[0] not in chrom_id_set: continue
line_count[col[0]] += 1
I.close()
print >> sys.stderr, '[INFO] ** The VCF line is %d' % line_count['all']
# Reset the chrom_id_set, guarantee they always consistent in VCF file
chrom_id_set = set([k for k, v in line_count.items() if v > 0])
if 'all' in chrom_id_set:
chrom_id_set.remove('all')
return line_count, list(chrom_id_set)
def _set_step_num(line_count, sub_scale_num):
step = line_count / sub_scale_num
if step == 0:
print >> sys.stderr, ('[WARNING] The split file number is bigger than '
'the line number of VCF files. Reset it to be 1.')
step = line_count
sub_scale_num = 1
if step * sub_scale_num < line_count:
step += 1
return sub_scale_num, step
if __name__ == '__main__':
cmdopt = get_opt()
main(cmdopt)
print >> sys.stderr, '**>> For the flowers bloom in the desert <<**\n'
|
ShujiaHuang/AsmVar2
|
scripts/scale.py
|
Python
|
mit
| 10,816
|
[
"pysam"
] |
d314cfdd948f09fa12afc4e746f785761a262be62d19e9869605fc40a7014fe3
|
# -*- coding: utf-8 -*-
"""
PyOpenWorm
==========
OpenWorm Unified Data Abstract Layer.
Classes
-------
.. autoclass:: Network
:members:
.. autoclass:: Neuron
:members:
.. autoclass:: Worm
:members:
.. autoclass:: Muscle
:members:
"""
__version__ = '0.0.1'
__author__ = 'Stephen Larson'
from .configure import Configure,Configureable,ConfigValue,BadConf
from .data import Data,DataUser,propertyTypes
from .dataObject import DataObject
from .cell import Cell
from .network import Network
from .neuron import Neuron
from .worm import Worm
from .evidence import Evidence,EvidenceError
from .muscle import Muscle
from .quantity import Quantity
from .my_neuroml import NeuroML
from .relationship import Relationship
from .connection import Connection
|
mwatts15/PyOpenWorm
|
PyOpenWorm/__init__.py
|
Python
|
mit
| 764
|
[
"NEURON"
] |
21e723364a1cfd4eed6f5328d791a4a8ec600d45fd8576b2ba35d416aef072ce
|
mnemonics = {
"NOP": b'\x00\x00',
"STA": b'\x10\x00',
"LDA": b'\x20\x00',
"ADD": b'\x30\x00',
"OR": b'\x40\x00',
"AND": b'\x50\x00',
"NOT": b'\x60\x00',
"JMP": b'\x80\x00',
"JN": b'\x90\x00',
"JZ": b'\xA0\x00',
"HLT": b'\xF0\x00'
}
import collections
import re
import types
class Lookahead:
def __init__(self, iter):
self.iter = iter
self.buffer = []
def __iter__(self):
return self
def next(self):
if self.buffer:
return self.buffer.pop(0)
else:
return next(self.iter)
def lookahead(self, n = 1):
"""Return an item n entries ahead in the iteration."""
while n > len(self.buffer):
try:
self.buffer.append(next(self.iter))
except StopIteration:
return None
return self.buffer[n-1]
Token = collections.namedtuple('Token', ['typ', 'value', 'line', 'column'])
def tokenize(code):
token_spec = [
('ID', r'[a-zA-Z_][a-zA-Z_0-9]*'),
('NUMBER', r'(([0-9]+)|(0x[0-9A-F]+))'),
('MODIFIER', r'((\.)|(:)|(@))'),
('NEWLINE', r'(#.*?)?\n'),
('SKIP', r'([ \t]+)'),
('MISMATCH',r'.')
]
tok_regex = '|'.join('(?P<%s>%s)' % pair for pair in token_spec)
line_num = 1
line_start = 0
for mo in re.finditer(tok_regex, code):
kind = mo.lastgroup
value = mo.group(kind)
if kind == 'NEWLINE':
line_start = mo.end()
line_num += 1
elif kind == 'SKIP':
pass
elif kind == 'MISMATCH':
raise RuntimeError('{} unexpected on line {}'.format(value, line_num))
else:
if kind == 'ID' and value in mnemonics:
kind = 'MNEMONIC'
column = mo.start() - line_start
yield Token(kind, value, line_num, column)
Node = collections.namedtuple('Node', ['typ', 'nodes'])
class ParseError(Exception):
def __init__(self, tk, expect):
self.tk = tk
self.expect = expect
msg = "Unexpected {} '{}'. Expected {}. At line {} end column {}".format(tk.typ, tk.value, expect, tk.line, tk.column)
super(ParseError, self).__init__(msg)
class Parser(object):
def __init__(self, tokens):
self.tokens = Lookahead(tokens);
def parse(self):
return self.parse_program()
def parse_program(self):
tks = self.tokens
intructions = []
while tks.lookahead():
tk = tks.lookahead()
if tk.typ == 'ID':
intructions.append(self.parse_assignment())
elif tk.typ == 'MODIFIER' and tk.value == ':':
intructions.append(self.parse_label())
elif tk.typ == 'MODIFIER' and tk.value =='@':
intructions.append(self.parse_declare())
elif tk.typ == 'MNEMONIC':
intructions.append(self.parse_mnemonic())
else:
raise ParseError(tk, "ID, ':', '@' or MNEMONIC")
return Node('PROGRAM', intructions)
def parse_assignment(self):
var = self.tokens.next()
value = self.tokens.next()
if value.typ not in ["NUMBER", "ID"]:
raise ParseError(value, "NUMBER or ID")
return Node('ASSIGMENT', [var, value])
def parse_mnemonic(self):
mne = self.tokens.next()
if mne.typ != "MNEMONIC":
raise ParseError(mne, "MNEMONIC")
if mne.value in ['NOP', 'NOT', 'HLT']:
return Node('INSTRUCTION', [mne])
else:
addr = self.parse_address()
return Node('INSTRUCTION', [mne, addr])
def parse_label(self):
tk = self.tokens.next()
if tk.typ == 'MODIFIER' and tk.value == ':':
ident = self.tokens.next()
if ident.typ == 'ID':
return Node('LABEL', [ident])
else:
raise ParseError(tk, "'ID'")
else:
raise ParseError(tk, "':'")
def parse_declare(self):
tk = self.tokens.next()
if tk.typ == 'MODIFIER' and tk.value == '@':
var = self.tokens.next()
if var.typ == 'ID':
value = self.tokens.next()
if value.typ == "NUMBER":
return Node('DECLARE', [var, value])
elif value.typ == "ID":
return Node('DECLARE', [var, value])
else:
raise ParseError(value, "NUMBER or ID")
else:
raise ParseError(tk, "'ID'")
else:
raise ParseError(tk, "'@'")
def parse_address(self):
tk = self.tokens.next()
if tk.typ == 'ID':
return Node('ADDRESS', [tk])
elif tk.typ == 'MODIFIER' and tk.value == '.':
value = self.tokens.next()
if value.typ == 'NUMBER':
return Node('ADDRESS', [Node('VALUE', [value])])
else:
raise ParseError(value, "NUMBER")
elif tk.typ == 'MODIFIER' and tk.value == '@':
ident = self.tokens.next()
if ident.typ == 'ID':
return Node('ADDRESS', [Node('USAGE', [ident])])
else:
raise ParseError(ident, "ID")
else:
raise ParseError(tk, "ID or '.'")
class NodeVisitor:
stack = []
def genvisit(self, node):
result = self.rule(node)
if isinstance(result, types.GeneratorType):
result = yield from result
return result
def rule(self, node):
name = "visit_"+node.typ
return getattr(self, name)(node)
def visit(self, node):
stack = [self.genvisit(node)]
result = None
while stack:
try:
node = stack[-1].send(result)
stack.append(self.genvisit(node))
result = None
except Exception as e:
stack.pop()
result = e.value
return result
class Prettifier(NodeVisitor):
def indent(self, text, amount, ch=' '):
padding = amount * ch
return padding + ('\n'+padding).join(text.split('\n'))
def rule(self, node):
return self.pretty(node)
def pretty(self, node):
if isinstance(node, Token):
return str(node)
result = node.typ+"\n"
for stm in node.nodes:
stm = yield stm
result += self.indent(stm, 3)+"\n"
return result
class Compiler(NodeVisitor):
def __init__(self, code_offset=0, data_offset=128):
self.code_offset = code_offset
self.data_offset = data_offset
self.lookup = {}
self.code_line = code_offset
self.data_line = data_offset
self.lookup_default = {}
def visit_PROGRAM(self, node):
magic_number = b'\x03NDR'
instructions = []
data = []
for stm in node.nodes:
ins, dat = yield stm
instructions+=ins
data+=dat
instructions = map(
lambda ins: ins(self.lookup) if isinstance(ins, collections.Callable) else ins,
instructions)
print("VARIABLE VALUES\t")
for k, l in self.lookup.items():
print("\t{}\t\t{}".format(k, l(self.lookup)[0] if isinstance(l, collections.Callable) else l[0]))
print("DEFAULT VALUES\t")
for k, l in self.lookup_default.items():
print("\t{}\t\t{}".format(k, l(self.lookup)[0] if isinstance(l, collections.Callable) else l[0]))
instr = bytes(self.code_offset*2)+b''.join(instructions)
data = bytes(self.data_offset*2-len(instr))+b''.join(data)
return magic_number+instr+data
def visit_ASSIGMENT(self, node):
ident = node.nodes[0].value
value = node.nodes[1]
if value.typ == 'NUMBER':
self.lookup[ident] = bytes([self.data_line, 0])
self.data_line+=1
return [], [bytes([int(value.value, base=0), 0])]
elif value.typ == 'ID':
self.lookup[ident] = lambda lt: lt[value.value]
return [], []
def visit_INSTRUCTION(self, node):
mne = node.nodes[0].value
self.code_line+=1
if len(node.nodes)>1:
c, d = yield node.nodes[1]
self.code_line+=1
return [mnemonics[mne]]+c, d
return [mnemonics[mne]], []
def visit_LABEL(self, node):
ident = node.nodes[0].value
self.lookup[ident] = bytes([self.code_line, 0])
return [], []
def visit_DECLARE(self, node):
ident, value = node.nodes
if value.typ == 'NUMBER':
self.lookup_default[ident.value] = bytes([int(value.value, base=0), 0])
elif value.typ == 'ID':
self.lookup_default[ident.value] = lambda lt: lt[value.value]
return [], []
def visit_ADDRESS(self, node):
addr = node.nodes[0]
if addr.typ == 'ID':
return [lambda lt: lt[addr.value]], []
else:
return (yield addr)
def visit_VALUE(self, node):
val = node.nodes[0].value
line = self.data_line
self.data_line+=1
return [bytes([line, 0])], [bytes([int(val, base=0), 0])]
def visit_USAGE(self, node):
ident = node.nodes[0].value
val = self.lookup_default[ident]
self.lookup[ident] = bytes([self.code_line, 0])
return [val], []
if __name__ == "__main__":
import argparse, sys, codecs
p = argparse.ArgumentParser()
p.add_argument('input_file', type=argparse.FileType('r'))
p.add_argument('output_file', nargs='?', type=argparse.FileType('wb'), default=sys.stdout)
p.add_argument('--code_offset', type=int, default=0)
p.add_argument('--data_offset', type=int, default=128)
p.add_argument('--hex', action='store_true')
p.add_argument('--debug', action='store_true')
args = p.parse_args()
if args.output_file == sys.stdout:
args.hex = True
with args.input_file as input_file:
code = input_file.read()
tks = tokenize(code)
ast = Parser(tks).parse()
if args.debug:
print(Prettifier().visit(ast))
with args.output_file as output_file:
output = Compiler(args.code_offset, args.data_offset).visit(ast)
if args.hex:
output = '\n\n'+codecs.encode(output, 'hex').decode('ascii')+'\n\n'
output_file.write(output)
|
dsvictor94/neander-compiler
|
compiler.py
|
Python
|
mit
| 10,505
|
[
"VisIt"
] |
b6d4d73f6d3a6c5ddcb576465bec57cd50ad805d44070e1e14f8974efa5a12ef
|
"""
Window functions (:mod:`scipy.signal.windows`)
==============================================
The suite of window functions for filtering and spectral estimation.
.. currentmodule:: scipy.signal.windows
.. autosummary::
:toctree: generated/
get_window -- Return a window of a given length and type.
barthann -- Bartlett-Hann window
bartlett -- Bartlett window
blackman -- Blackman window
blackmanharris -- Minimum 4-term Blackman-Harris window
bohman -- Bohman window
boxcar -- Boxcar window
chebwin -- Dolph-Chebyshev window
cosine -- Cosine window
dpss -- Discrete prolate spheroidal sequences
exponential -- Exponential window
flattop -- Flat top window
gaussian -- Gaussian window
general_cosine -- Generalized Cosine window
general_gaussian -- Generalized Gaussian window
general_hamming -- Generalized Hamming window
hamming -- Hamming window
hann -- Hann window
hanning -- Hann window
kaiser -- Kaiser window
nuttall -- Nuttall's minimum 4-term Blackman-Harris window
parzen -- Parzen window
taylor -- Taylor window
triang -- Triangular window
tukey -- Tukey window
"""
from ._windows import *
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'general_cosine',
'general_hamming', 'chebwin', 'cosine', 'hann',
'exponential', 'tukey', 'taylor', 'get_window', 'dpss']
|
grlee77/scipy
|
scipy/signal/windows/__init__.py
|
Python
|
bsd-3-clause
| 1,771
|
[
"Gaussian"
] |
fa9c9b996a9507261a435a3b3d8694b8cd079ba655c9fb065c46c05e059cc2de
|
from redux.ast import ASTNode
import logging
class Visitor(object):
"Implements the extrinsic Visitor pattern."
def __init__(self):
super(Visitor, self).__init__()
self.depth = 0
def log(self, fmt, *args, **kwargs):
logging.getLogger(type(self).__name__).debug("%s%d: " + fmt, " " * self.depth, self.depth, *args, **kwargs)
def visit(self, node, *args, **kwargs):
"Starts visiting node."
visitor = self.generic_visit
for cls in node.__class__.__mro__:
meth_name = 'visit_' + cls.__name__
try:
visitor = getattr(self, meth_name)
break
except AttributeError:
pass
self.log("Visiting child: %r", node)
self.depth += 1
result = visitor(node, *args, **kwargs)
self.log("Leaving node: %r", node)
self.depth -= 1
return result
class ASTVisitor(Visitor):
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for name, value in node.fields():
if isinstance(value, list):
for item in value:
if isinstance(item, ASTNode):
self.visit(item)
elif isinstance(value, ASTNode):
self.visit(value)
def push_scope(self):
pass
def pop_scope(self):
pass
def visit_Block(self, block):
self.push_scope()
self.generic_visit(block)
self.pop_scope()
class ASTTransformer(ASTVisitor):
def generic_visit(self, node):
for name, old_value in node.fields():
old_value = getattr(node, name, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, ASTNode):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, ASTNode):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, ASTNode):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, name)
else:
setattr(node, name, new_node)
return node
def visit_Block(self, block):
super(ASTTransformer, self).visit_Block(block)
return block
|
Muon/redux
|
redux/visitor.py
|
Python
|
mit
| 2,625
|
[
"VisIt"
] |
2966b0045e52e30c77edef5c986d7cbb379b61f5f54b866466ede539787c4dac
|
#!/usr/bin/env python3
##############################################################################
# Copyright (c) 2021: Leonardo Cardoso
# https://github.com/LeoFCardoso/pdf2pdfocr
##############################################################################
# OCR a PDF and add a text "layer" in the original file (a so called "pdf sandwich")
# Use only open source tools.
# Unless requested, does not re-encode the images inside an unprotected PDF file.
# Leonardo Cardoso - inspired in ocrmypdf (https://github.com/jbarlow83/OCRmyPDF)
# and this post: https://github.com/jbarlow83/OCRmyPDF/issues/8
###############################################################################
import argparse
import configparser
import datetime
import errno
import glob
import io
import itertools
import math
import multiprocessing
import os
import random
import re
import shlex
import shutil
import signal
import string
import subprocess
import sys
import tempfile
import time
from collections import namedtuple
from concurrent import futures
from distutils.version import LooseVersion
from pathlib import Path
from xml.etree import ElementTree
import PyPDF2
import psutil
from PIL import Image, ImageChops
from PyPDF2.generic import ByteStringObject
from PyPDF2.utils import PdfReadError
from bs4 import BeautifulSoup
from reportlab.lib.units import inch
from reportlab.pdfgen.canvas import Canvas
__author__ = 'Leonardo F. Cardoso'
VERSION = '1.9.1 marapurense '
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, flush=True, **kwargs)
def do_pdftoimage(param_path_pdftoppm, param_page_range, param_input_file, param_image_resolution, param_tmp_dir,
param_prefix, param_shell_mode):
"""
Will be called from multiprocessing, so no global variables are allowed.
Convert PDF to image file.
"""
command_line_list = [param_path_pdftoppm]
first_page = 0
last_page = 0
if param_page_range is not None:
first_page = param_page_range[0]
last_page = param_page_range[1]
command_line_list += ['-f', str(first_page), '-l', str(last_page)]
#
command_line_list += ['-r', str(param_image_resolution), '-jpeg', param_input_file, param_tmp_dir + param_prefix]
pimage = subprocess.Popen(command_line_list, stdout=subprocess.DEVNULL,
stderr=open(param_tmp_dir + "pdftoppm_err_{0}-{1}-{2}.log".format(param_prefix, first_page, last_page), "wb"),
shell=param_shell_mode)
pimage.wait()
return pimage.returncode
def do_autorotate_info(param_image_file, param_shell_mode, param_temp_dir, param_tess_lang, param_path_tesseract, param_tesseract_version):
"""
Will be called from multiprocessing, so no global variables are allowed.
Do autorotate of images based on tesseract (execution with 'psm 0') information.
"""
param_image_no_ext = os.path.splitext(os.path.basename(param_image_file))[0]
psm_parameter = "-psm" if (param_tesseract_version == 3) else "--psm"
tess_command_line = [param_path_tesseract, '-l', param_tess_lang, psm_parameter, '0', param_image_file,
param_temp_dir + param_image_no_ext]
ptess1 = subprocess.Popen(tess_command_line,
stdout=open(param_temp_dir + "autorot_tess_out_{0}.log".format(param_image_no_ext), "wb"),
stderr=open(param_temp_dir + "autorot_tess_err_{0}.log".format(param_image_no_ext), "wb"),
shell=param_shell_mode)
ptess1.wait()
def do_deskew(param_image_file, param_threshold, param_shell_mode, param_path_mogrify):
"""
Will be called from multiprocessing, so no global variables are allowed.
Do a deskew of image.
"""
pd = subprocess.Popen([param_path_mogrify, '-deskew', param_threshold, param_image_file], shell=param_shell_mode)
pd.wait()
return True
def do_ocr_tesseract(param_image_file, param_extra_ocr_flag, param_tess_lang, param_tess_psm, param_temp_dir, param_shell_mode, param_path_tesseract,
param_text_generation_strategy, param_delete_temps, param_tess_can_textonly_pdf):
"""
Will be called from multiprocessing, so no global variables are allowed.
Do OCR of image with tesseract
"""
param_image_no_ext = os.path.splitext(os.path.basename(param_image_file))[0]
tess_command_line = [param_path_tesseract]
if type(param_extra_ocr_flag) == str:
tess_command_line.extend(param_extra_ocr_flag.split(" "))
tess_command_line.extend(['-l', param_tess_lang])
if param_text_generation_strategy == "tesseract":
tess_command_line += ['-c', 'tessedit_create_pdf=1']
if param_tess_can_textonly_pdf:
tess_command_line += ['-c', 'textonly_pdf=1']
#
if param_text_generation_strategy == "native":
tess_command_line += ['-c', 'tessedit_create_hocr=1']
#
tess_command_line += [
'-c', 'tessedit_create_txt=1',
'-c', 'tessedit_pageseg_mode=' + param_tess_psm,
param_image_file, param_temp_dir + param_image_no_ext]
pocr = subprocess.Popen(tess_command_line,
stdout=subprocess.DEVNULL,
stderr=open(param_temp_dir + "tess_err_{0}.log".format(param_image_no_ext), "wb"),
shell=param_shell_mode)
pocr.wait()
if param_text_generation_strategy == "tesseract" and (not param_tess_can_textonly_pdf):
pdf_file = param_temp_dir + param_image_no_ext + ".pdf"
pdf_file_tmp = param_temp_dir + param_image_no_ext + ".tesspdf"
os.rename(pdf_file, pdf_file_tmp)
output_pdf = PyPDF2.PdfFileWriter()
desc_pdf_file_tmp = open(pdf_file_tmp, 'rb')
tess_pdf = PyPDF2.PdfFileReader(desc_pdf_file_tmp, strict=False)
for i in range(tess_pdf.getNumPages()):
imagepage = tess_pdf.getPage(i)
output_pdf.addPage(imagepage)
#
output_pdf.removeImages(ignoreByteStringObject=False)
out_page = output_pdf.getPage(0) # Tesseract PDF is always one page in this software
# Hack to obtain smaller file (delete the image reference)
out_page["/Resources"][PyPDF2.generic.createStringObject("/XObject")] = PyPDF2.generic.ArrayObject()
out_page.compressContentStreams()
with open(pdf_file, 'wb') as f:
output_pdf.write(f)
desc_pdf_file_tmp.close()
# Try to save some temp space as tesseract generate PDF with same size of image
if param_delete_temps:
os.remove(pdf_file_tmp)
#
if param_text_generation_strategy == "native":
hocr = HocrTransform(param_temp_dir + param_image_no_ext + ".hocr", 300)
hocr.to_pdf(param_temp_dir + param_image_no_ext + ".pdf", image_file_name=None, show_bounding_boxes=False,
invisible_text=True)
# Track progress in all situations
Path(param_temp_dir + param_image_no_ext + ".tmp").touch() # .tmp files are used to track overall progress
def do_ocr_cuneiform(param_image_file, param_extra_ocr_flag, param_cunei_lang, param_temp_dir, param_shell_mode, param_path_cunei):
"""
Will be called from multiprocessing, so no global variables are allowed.
Do OCR of image with cuneiform
"""
param_image_no_ext = os.path.splitext(os.path.basename(param_image_file))[0]
cunei_command_line = [param_path_cunei]
if type(param_extra_ocr_flag) == str:
cunei_command_line.extend(param_extra_ocr_flag.split(" "))
cunei_command_line.extend(['-l', param_cunei_lang.lower(), "-f", "hocr", "-o", param_temp_dir + param_image_no_ext + ".hocr", param_image_file])
#
pocr = subprocess.Popen(cunei_command_line,
stdout=open(param_temp_dir + "cuneif_out_{0}.log".format(param_image_no_ext), "wb"),
stderr=open(param_temp_dir + "cuneif_err_{0}.log".format(param_image_no_ext), "wb"),
shell=param_shell_mode)
pocr.wait()
# Sometimes, cuneiform fails to OCR and expected HOCR file is missing. Experiments show that English can be used to try a workaround.
if not os.path.isfile(param_temp_dir + param_image_no_ext + ".hocr") and param_cunei_lang.lower() != "eng":
eprint("Warning: fail to OCR file '{0}'. Trying again with English language.".format(param_image_no_ext))
cunei_command_line = [param_path_cunei]
if type(param_extra_ocr_flag) == str:
cunei_command_line.extend(param_extra_ocr_flag.split(" "))
cunei_command_line.extend(['-l', "eng", "-f", "hocr", "-o", param_temp_dir + param_image_no_ext + ".hocr", param_image_file])
pocr = subprocess.Popen(cunei_command_line,
stdout=open(param_temp_dir + "cuneif_out_eng_{0}.log".format(param_image_no_ext), "wb"),
stderr=open(param_temp_dir + "cuneif_err_eng_{0}.log".format(param_image_no_ext), "wb"),
shell=param_shell_mode)
pocr.wait()
#
bs_parser = "lxml"
if os.path.isfile(param_temp_dir + param_image_no_ext + ".hocr"):
# Try to fix unclosed meta tags, as cuneiform HOCR may be not well formed
with open(param_temp_dir + param_image_no_ext + ".hocr", "r") as fpr:
corrected_hocr = str(BeautifulSoup(fpr, bs_parser))
else:
eprint("Warning: fail to OCR file '{0}'. Page will not contain text.".format(param_image_no_ext))
# TODO try to use the same size as original PDF page (bbox is hard coded by now to look like A4 page - portrait)
corrected_hocr = str(BeautifulSoup('<div class="ocr_page" id="page_1" title="image x; bbox 0 0 1700 2400">', bs_parser))
with open(param_temp_dir + param_image_no_ext + ".fixed.hocr", "w") as fpw:
fpw.write(corrected_hocr)
#
hocr = HocrTransform(param_temp_dir + param_image_no_ext + ".fixed.hocr", 300)
hocr.to_pdf(param_temp_dir + param_image_no_ext + ".pdf", image_file_name=None, show_bounding_boxes=False, invisible_text=True)
# Track progress
Path(param_temp_dir + param_image_no_ext + ".tmp").touch() # .tmp files are used to track overall progress
def do_rebuild(param_image_file, param_path_convert, param_convert_params, param_tmp_dir, param_shell_mode):
"""
Will be called from multiprocessing, so no global variables are allowed.
Create one PDF file from image file.
"""
param_image_no_ext = os.path.splitext(os.path.basename(param_image_file))[0]
# http://stackoverflow.com/questions/79968/split-a-string-by-spaces-preserving-quoted-substrings-in-python
convert_params_list = shlex.split(param_convert_params)
command_rebuild = [param_path_convert, param_image_file] + convert_params_list + [param_tmp_dir + "REBUILD_" + param_image_no_ext + ".pdf"]
prebuild = subprocess.Popen(
command_rebuild,
stdout=open(param_tmp_dir + "convert_log_{0}.log".format(param_image_no_ext), "wb"),
stderr=open(param_tmp_dir + "convert_err_{0}.log".format(param_image_no_ext), "wb"),
shell=param_shell_mode)
prebuild.wait()
def do_check_img_greyscale(param_image_file):
"""
Inspired in code provided by karl-k:
https://stackoverflow.com/questions/23660929/how-to-check-whether-a-jpeg-image-is-color-or-gray-scale-using-only-python-stdli
Check if image is monochrome (1 channel or 3 identical channels)
"""
im = Image.open(param_image_file).convert('RGB')
rgb = im.split()
if ImageChops.difference(rgb[0], rgb[1]).getextrema()[1] != 0:
return False
if ImageChops.difference(rgb[0], rgb[2]).getextrema()[1] != 0:
return False
#
return True
def percentual_float(x):
x = float(x)
if x <= 0.0 or x > 1.0:
raise argparse.ArgumentTypeError("%r not in range (0.0, 1.0]" % (x,))
return x
class HocrTransformError(Exception):
pass
class HocrTransform:
"""
A class for converting documents from the hOCR format.
For details of the hOCR format, see:
http://docs.google.com/View?docid=dfxcv4vc_67g844kf
Adapted from https://github.com/jbarlow83/OCRmyPDF/blob/master/ocrmypdf/hocrtransform.py
"""
def __init__(self, hocr_file_name, dpi):
self.rect = namedtuple('Rect', ['x1', 'y1', 'x2', 'y2'])
self.dpi = dpi
self.boxPattern = re.compile(r'bbox((\s+\d+){4})')
self.hocr = ElementTree.parse(hocr_file_name)
# if the hOCR file has a namespace, ElementTree requires its use to
# find elements
matches = re.match(r'({.*})html', self.hocr.getroot().tag)
self.xmlns = ''
if matches:
self.xmlns = matches.group(1)
# get dimension in pt (not pixel!!!!) of the OCRed image
self.width, self.height = None, None
for div in self.hocr.findall(
".//%sdiv[@class='ocr_page']" % self.xmlns):
coords = self.element_coordinates(div)
pt_coords = self.pt_from_pixel(coords)
self.width = pt_coords.x2 - pt_coords.x1
self.height = pt_coords.y2 - pt_coords.y1
# there shouldn't be more than one, and if there is, we don't want it
break
if self.width is None or self.height is None:
raise HocrTransformError("hocr file is missing page dimensions")
def __str__(self):
"""
Return the textual content of the HTML body
"""
if self.hocr is None:
return ''
body = self.hocr.find(".//%sbody" % self.xmlns)
if body:
return self._get_element_text(body)
else:
return ''
def _get_element_text(self, element):
"""
Return the textual content of the element and its children
"""
text = ''
if element.text is not None:
text += element.text
for child in element:
text += self._get_element_text(child)
if element.tail is not None:
text += element.tail
return text
def element_coordinates(self, element):
"""
Returns a tuple containing the coordinates of the bounding box around
an element
"""
out = (0, 0, 0, 0)
if 'title' in element.attrib:
matches = self.boxPattern.search(element.attrib['title'])
if matches:
coords = matches.group(1).split()
out = self.rect._make(int(coords[n]) for n in range(4))
return out
def pt_from_pixel(self, pxl):
"""
Returns the quantity in PDF units (pt) given quantity in pixels
"""
return self.rect._make(
(c / self.dpi * inch) for c in pxl)
def replace_unsupported_chars(self, s):
"""
Given an input string, returns the corresponding string that:
- is available in the helvetica facetype
- does not contain any ligature (to allow easy search in the PDF file)
"""
# The 'u' before the character to replace indicates that it is a
# unicode character
s = s.replace(u"fl", "fl")
s = s.replace(u"fi", "fi")
return s
def to_pdf(self, out_file_name, image_file_name=None, show_bounding_boxes=False, fontname="Helvetica",
invisible_text=True):
"""
Creates a PDF file with an image superimposed on top of the text.
Text is positioned according to the bounding box of the lines in
the hOCR file.
The image need not be identical to the image used to create the hOCR
file.
It can have a lower resolution, different color mode, etc.
"""
# create the PDF file
# page size in points (1/72 in.)
pdf = Canvas(
out_file_name, pagesize=(self.width, self.height), pageCompression=1)
# draw bounding box for each paragraph
# light blue for bounding box of paragraph
pdf.setStrokeColorRGB(0, 1, 1)
# light blue for bounding box of paragraph
pdf.setFillColorRGB(0, 1, 1)
pdf.setLineWidth(0) # no line for bounding box
for elem in self.hocr.findall(
".//%sp[@class='%s']" % (self.xmlns, "ocr_par")):
elemtxt = self._get_element_text(elem).rstrip()
if len(elemtxt) == 0:
continue
pxl_coords = self.element_coordinates(elem)
pt = self.pt_from_pixel(pxl_coords)
# draw the bbox border
if show_bounding_boxes:
pdf.rect(pt.x1, self.height - pt.y2, pt.x2 - pt.x1, pt.y2 - pt.y1, fill=1)
# check if element with class 'ocrx_word' are available
# otherwise use 'ocr_line' as fallback
elemclass = "ocr_line"
if self.hocr.find(".//%sspan[@class='ocrx_word']" % self.xmlns) is not None:
elemclass = "ocrx_word"
# itterate all text elements
# light green for bounding box of word/line
pdf.setStrokeColorRGB(1, 0, 0)
pdf.setLineWidth(0.5) # bounding box line width
pdf.setDash(6, 3) # bounding box is dashed
pdf.setFillColorRGB(0, 0, 0) # text in black
for elem in self.hocr.findall(".//%sspan[@class='%s']" % (self.xmlns, elemclass)):
elemtxt = self._get_element_text(elem).rstrip()
elemtxt = self.replace_unsupported_chars(elemtxt)
if len(elemtxt) == 0:
continue
pxl_coords = self.element_coordinates(elem)
pt = self.pt_from_pixel(pxl_coords)
# draw the bbox border
if show_bounding_boxes:
pdf.rect(pt.x1, self.height - pt.y2, pt.x2 - pt.x1, pt.y2 - pt.y1, fill=0)
text = pdf.beginText()
fontsize = pt.y2 - pt.y1
text.setFont(fontname, fontsize)
if invisible_text:
text.setTextRenderMode(3) # Invisible (indicates OCR text)
# set cursor to bottom left corner of bbox (adjust for dpi)
text.setTextOrigin(pt.x1, self.height - pt.y2)
# scale the width of the text to fill the width of the bbox
text.setHorizScale(100 * (pt.x2 - pt.x1) / pdf.stringWidth(elemtxt, fontname, fontsize))
# write the text to the page
text.textLine(elemtxt)
pdf.drawText(text)
#
# put the image on the page, scaled to fill the page
if image_file_name is not None:
pdf.drawImage(image_file_name, 0, 0, width=self.width, height=self.height)
# finish up the page and save it
pdf.showPage()
pdf.save()
#
class Pdf2PdfOcr:
# External tools command. If you can't edit your path, adjust here to match your system
cmd_cuneiform = "cuneiform"
path_cuneiform = ""
cmd_tesseract = "tesseract"
path_tesseract = ""
cmd_convert = "convert"
cmd_magick = "magick" # used on Windows with ImageMagick 7+ (to avoid conversion path problems)
path_convert = ""
cmd_mogrify = "mogrify"
path_mogrify = ""
cmd_file = "file"
path_file = ""
cmd_pdftoppm = "pdftoppm"
path_pdftoppm = ""
cmd_pdffonts = "pdffonts"
path_pdffonts = ""
cmd_ps2pdf = "ps2pdf"
path_ps2pdf = ""
cmd_pdf2ps = "pdf2ps"
path_pdf2ps = ""
cmd_qpdf = "qpdf"
path_qpdf = ""
tesseract_can_textonly_pdf = False
"""Since Tesseract 3.05.01, new use case of tesseract - https://github.com/tesseract-ocr/tesseract/issues/660"""
tesseract_version = 3
"""Tesseract version installed on system"""
extension_images = "jpg"
"""Temp images will use this extension. Using jpg to avoid big temp files in pdf with a lot of pages"""
output_file = ""
"""The PDF output file"""
output_file_text = ""
"""The TXT output file"""
path_this_python = sys.executable
"""Path for python in this system"""
shell_mode = (sys.platform == "win32")
"""How to run external process? In Windows use Shell=True
http://stackoverflow.com/questions/5658622/python-subprocess-popen-environment-path
"Also, on Windows with shell=False, it pays no attention to PATH at all,
and will only look in relative to the current working directory."
"""
def __init__(self, args):
super().__init__()
self.log_time_format = '%Y-%m-%d %H:%M:%S.%f'
#
# A random prefix to support multiple execution in parallel
self.prefix = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(5))
# The temp dir
self.tmp_dir = tempfile.gettempdir() + os.path.sep + "pdf2pdfocr_{0}".format(self.prefix) + os.path.sep
os.mkdir(self.tmp_dir)
#
self.verbose_mode = args.verbose_mode
self.check_external_tools()
# Handle arguments from command line
self.safe_mode = args.safe_mode
self.check_text_mode = args.check_text_mode
self.check_protection_mode = args.check_protection_mode
self.avoid_high_pages_mode = args.max_pages is not None
self.avoid_high_pages_pages = args.max_pages
self.avoid_small_file_mode = args.min_kbytes is not None
self.avoid_small_file_limit_kb = args.min_kbytes
self.force_rebuild_mode = args.force_rebuild_mode
self.user_convert_params = args.convert_params
if self.user_convert_params is None:
self.user_convert_params = "" # Default
self.deskew_threshold = args.deskew_percent
self.use_deskew_mode = args.deskew_percent is not None
self.use_autorotate = args.autorotate
self.parallel_threshold = args.parallel_percent
if self.parallel_threshold is None:
self.parallel_threshold = 1 # Default
self.create_text_mode = args.create_text_mode
self.force_out_file_mode = args.output_file is not None
if self.force_out_file_mode:
self.force_out_file = args.output_file
else:
self.force_out_file = ""
self.force_out_dir_mode = args.output_dir is not None
if self.force_out_dir_mode:
self.force_out_dir = args.output_dir
else:
self.force_out_dir = ""
if self.force_out_file != "" and self.force_out_dir != "":
eprint("It's not possible to force output name and dir at the same time. Please use '-o' OR '-O'")
sys.exit(1)
if self.force_out_dir_mode and (not os.path.isdir(self.force_out_dir)):
eprint("Invalid output directory: {0}".format(self.force_out_dir))
sys.exit(1)
self.tess_langs = args.tess_langs
if self.tess_langs is None:
self.tess_langs = "por+eng" # Default
self.tess_psm = args.tess_psm
if self.tess_psm is None:
self.tess_psm = "1" # Default
self.image_resolution = args.image_resolution
self.text_generation_strategy = args.text_generation_strategy
if self.text_generation_strategy not in ["tesseract", "native"]:
eprint("{0} is not a valid text generation strategy. Exiting.".format(self.text_generation_strategy))
sys.exit(1)
self.ocr_ignored = False
self.ocr_engine = args.ocr_engine
if self.ocr_engine not in ["tesseract", "cuneiform", "no_ocr"]:
eprint("{0} is not a valid ocr engine. Exiting.".format(self.ocr_engine))
sys.exit(1)
self.extra_ocr_flag = args.extra_ocr_flag
if self.extra_ocr_flag is not None:
self.extra_ocr_flag = str(self.extra_ocr_flag.strip())
self.delete_temps = not args.keep_temps
self.input_file = args.input_file
if not os.path.isfile(self.input_file):
eprint("{0} not found. Exiting.".format(self.input_file))
sys.exit(1)
self.input_file = os.path.abspath(self.input_file)
self.input_file_type = ""
#
self.input_file_has_text = False
self.input_file_is_encrypted = False
self.input_file_metadata = dict()
self.input_file_number_of_pages = None
#
self.debug("Temp dir is {0}".format(self.tmp_dir))
self.debug("Prefix is {0}".format(self.prefix))
# Where am I?
self.script_dir = os.path.dirname(os.path.abspath(__file__)) + os.path.sep
self.debug("Script dir is {0}".format(self.script_dir))
#
self.cpu_to_use = int(multiprocessing.cpu_count() * self.parallel_threshold)
if self.cpu_to_use == 0:
self.cpu_to_use = 1
self.debug("Parallel operations will use {0} CPUs".format(self.cpu_to_use))
#
self.main_pool = multiprocessing.Pool(self.cpu_to_use)
#
def check_external_tools(self):
"""Check if external tools are available, aborting or warning in case of any error."""
self.path_tesseract = shutil.which(self.cmd_tesseract)
if self.path_tesseract is None:
eprint("tesseract not found. Aborting...")
sys.exit(1)
#
self.tesseract_can_textonly_pdf = self.test_tesseract_textonly_pdf()
self.tesseract_version = self.get_tesseract_version()
#
self.path_cuneiform = shutil.which(self.cmd_cuneiform)
if self.path_cuneiform is None:
self.debug("cuneiform not available")
#
# Try to avoid errors on Windows with native OS "convert" command
# http://savage.net.au/ImageMagick/html/install-convert.html
# https://www.imagemagick.org/script/magick.php
self.path_convert = shutil.which(self.cmd_convert)
if not self.test_convert():
self.path_convert = shutil.which(self.cmd_magick)
if self.path_convert is None:
eprint("convert/magick from ImageMagick not found. Aborting...")
sys.exit(1)
#
self.path_mogrify = shutil.which(self.cmd_mogrify)
if self.path_mogrify is None:
eprint("mogrify from ImageMagick not found. Aborting...")
sys.exit(1)
#
self.path_file = shutil.which(self.cmd_file)
if self.path_file is None:
eprint("file not found. Aborting...")
sys.exit(1)
#
self.path_pdftoppm = shutil.which(self.cmd_pdftoppm)
if self.path_pdftoppm is None:
eprint("pdftoppm (poppler) not found. Aborting...")
sys.exit(1)
if self.get_pdftoppm_version() <= LooseVersion("0.70.0"):
self.log("External tool 'pdftoppm' is outdated. Please upgrade poppler")
#
self.path_pdffonts = shutil.which(self.cmd_pdffonts)
if self.path_pdffonts is None:
eprint("pdffonts (poppler) not found. Aborting...")
sys.exit(1)
#
self.path_ps2pdf = shutil.which(self.cmd_ps2pdf)
self.path_pdf2ps = shutil.which(self.cmd_pdf2ps)
if self.path_ps2pdf is None or self.path_pdf2ps is None:
eprint("ps2pdf or pdf2ps (ghostscript) not found. File repair will not work...")
#
self.path_qpdf = shutil.which(self.cmd_qpdf)
if self.path_qpdf is None:
self.log("External tool 'qpdf' not available. Merge can be slow")
else:
qpdf_version = self.get_qpdf_version()
minimum_version = "8.4.1"
if qpdf_version < LooseVersion(minimum_version):
self.log("External tool 'qpdf' is not on minimum version ({0}). Merge can be slow".format(minimum_version))
self.path_qpdf = None
#
def debug(self, param):
try:
if self.verbose_mode:
tstamp = datetime.datetime.now().strftime(self.log_time_format)
print("[{0}] [DEBUG] {1}".format(tstamp, param), flush=True)
except:
pass
def log(self, param):
try:
tstamp = datetime.datetime.now().strftime(self.log_time_format)
print("[{0}] [LOG] {1}".format(tstamp, param), flush=True)
except:
pass
def cleanup(self):
#
# Try to kill all child process still alive (in timeout situation)
process = psutil.Process(os.getpid())
for proc in process.children(recursive=True):
if "python" not in proc.name().lower(): # Python process are from multiprocessing and will be handled below
self.debug("Killing child process {0} with pid {1}".format(proc.name(), proc.pid))
try:
proc.kill()
except:
pass # By design
#
# Cleanup the pool
if self.main_pool:
self.main_pool.close()
self.main_pool.terminate()
self.main_pool.join()
self.main_pool = None # Signal for pool to stop waiting in while loops
#
# Cleanup temp files
if self.delete_temps:
shutil.rmtree(self.tmp_dir, ignore_errors=True)
else:
eprint("Temporary files kept in {0}".format(self.tmp_dir))
def ocr(self):
time_at_start = time.time()
self.log("Welcome to pdf2pdfocr version {0} - https://github.com/LeoFCardoso/pdf2pdfocr".format(VERSION))
self.check_avoid_file_by_size()
self.detect_file_type()
if self.input_file_type == "application/pdf":
self.validate_pdf_input_file()
self.debug("Conversion params: {0}".format(self.user_convert_params))
self.define_output_files()
self.initial_cleanup()
self.convert_input_to_images()
# TODO - create param to user pass input page range for OCR
image_file_list = sorted(glob.glob(self.tmp_dir + "{0}*.{1}".format(self.prefix, self.extension_images)))
if self.input_file_number_of_pages is None:
self.input_file_number_of_pages = len(image_file_list)
self.check_avoid_high_pages()
# TODO - create param to user pass image filters before OCR
self.autorotate_info(image_file_list)
self.deskew(image_file_list)
self.external_ocr(image_file_list)
if not self.ocr_ignored:
self.join_ocred_pdf()
self.create_text_output()
self.build_final_output()
self.autorotate_final_output()
#
# TODO - create directory watch mode (maybe using watchdog library)
# Like a daemon
#
# TODO - create option for PDF/A files
# gs -dPDFA=3 -dBATCH -dNOPAUSE -sProcessColorModel=DeviceCMYK -sDEVICE=pdfwrite
# -sPDFACompatibilityPolicy=2 -sOutputFile=output_filename.pdf ./Test.pdf
# As in
# http://git.ghostscript.com/?p=ghostpdl.git;a=blob_plain;f=doc/VectorDevices.htm;hb=HEAD#PDFA
#
# Edit producer and build final PDF
# Without edit producer is easy as "shutil.copyfile(tmp_dir + prefix + "-OUTPUT.pdf", output_file)"
self.edit_producer()
#
self.debug("Output file created")
#
# Adjust the new file timestamp
# TODO touch -r "$INPUT_FILE" "$OUTPUT_FILE"
#
self.cleanup()
time_elapsed = time.time() - time_at_start
#
paypal_donate_link = "https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=leonardo%2ef%2ecardoso%40gmail%2ecom&lc=US&item_name" \
"=pdf2pdfocr%20development¤cy_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted"
flattr_donate_link = "https://flattr.com/@pdf2pdfocr.devel"
tippin_donate_link = "https://tippin.me/@LeoFCardoso"
bitcoin_address = "173D1zQQyzvCCCek9b1SpDvh7JikBEdtRJ"
dogecoin_address = "D94hD2qPnkxmZk8qa1b6F1d7NfUrPkmcrG"
pix_key = "0726e8f2-7e59-488a-8abb-bda8f0d7d9ce"
success_message = """Success in {6:.3f} seconds!
This software is free, but if you like it, please donate to support new features.
---> Paypal
{0}
---> Flattr
{1}
---> Tippin.me
{2}
---> Bitcoin (BTC) address: {3}
---> Dogecoin (DOGE) address: {4}
---> PIX (Brazilian Instant Payments) key: {5}
---> Please contact for donations in other cryptocurrencies - https://github.com/LeoFCardoso/pdf2pdfocr""".format(
paypal_donate_link, flattr_donate_link, tippin_donate_link, bitcoin_address, dogecoin_address, pix_key, time_elapsed)
self.log(success_message)
def _merge_ocr(self, image_pdf_file_path, text_pdf_file_path, result_pdf_file_path, tag):
# Merge OCR background PDF into the main PDF document making a PDF sandwich
self.debug("Merging with OCR")
if self.path_qpdf is not None:
try:
with open(image_pdf_file_path, "rb") as img_f:
img_data = PyPDF2.PdfFileReader(img_f, strict=False)
first_page_img_rect = img_data.getPage(0).mediaBox
first_page_img_area = first_page_img_rect.getWidth() * first_page_img_rect.getHeight()
except PdfReadError:
eprint("Warning: could not read input file page geometry. Merge may fail, please check input file.")
first_page_img_area = 0
with open(text_pdf_file_path, "rb") as txt_f:
txt_data = PyPDF2.PdfFileReader(txt_f, strict=False)
first_page_txt_rect = txt_data.getPage(0).mediaBox
first_page_txt_area = first_page_txt_rect.getWidth() * first_page_txt_rect.getHeight()
#
# Define overlay / underlay based on biggest page
if first_page_txt_area < first_page_img_area:
qpdf_command = [self.path_qpdf, "--underlay", image_pdf_file_path, "--", text_pdf_file_path, result_pdf_file_path]
else:
qpdf_command = [self.path_qpdf, "--overlay", text_pdf_file_path, "--", image_pdf_file_path, result_pdf_file_path]
#
pqpdf = subprocess.Popen(
qpdf_command,
stdout=subprocess.DEVNULL,
stderr=open(self.tmp_dir + "err_merge-qpdf-{0}-{1}.log".format(self.prefix, tag), "wb"),
shell=self.shell_mode)
pqpdf.wait()
else:
pmulti = subprocess.Popen(
[self.path_this_python, self.script_dir + 'pdf2pdfocr_multibackground.py',
image_pdf_file_path, text_pdf_file_path, result_pdf_file_path],
stdout=subprocess.DEVNULL,
stderr=open(self.tmp_dir + "err_merge-multiback-{0}-{1}.log".format(self.prefix, tag), "wb"),
shell=self.shell_mode)
pmulti.wait()
def build_final_output(self):
# Start building final PDF.
# First, should we rebuild source file?
rebuild_pdf_from_images = False
if self.input_file_is_encrypted or self.input_file_type != "application/pdf" or self.use_deskew_mode:
rebuild_pdf_from_images = True
#
if (not rebuild_pdf_from_images) and (not self.force_rebuild_mode):
if not self.ocr_ignored:
self._merge_ocr(self.input_file, (self.tmp_dir + self.prefix + "-ocr.pdf"), (self.tmp_dir + self.prefix + "-OUTPUT.pdf"),
"final-output")
#
# Try to handle fail.
# The code below try to rewrite source PDF and try again.
if not os.path.isfile(self.tmp_dir + self.prefix + "-OUTPUT.pdf"):
self.try_repair_input_and_merge()
else:
# OCR ignored
shutil.copyfile(self.input_file, (self.tmp_dir + self.prefix + "-OUTPUT.pdf"))
else:
self.rebuild_and_merge()
#
if not os.path.isfile(self.tmp_dir + self.prefix + "-OUTPUT.pdf"):
eprint("Output file could not be created :( Exiting with error code.")
self.cleanup()
sys.exit(1)
def rebuild_and_merge(self):
eprint("Warning: metadata wiped from final PDF file (original file is not an unprotected PDF / "
"forcing rebuild from extracted images / using deskew)")
# Convert presets
# Please read http://www.imagemagick.org/Usage/quantize/#colors_two
preset_fast = "-threshold 60% -compress Group4"
preset_best = "-colors 2 -colorspace gray -normalize -threshold 60% -compress Group4"
preset_grayscale = "-threshold 85% -morphology Dilate Diamond -compress Group4"
preset_jpeg = "-strip -interlace Plane -gaussian-blur 0.05 -quality 50% -compress JPEG"
preset_jpeg2000 = "-quality 32% -compress JPEG2000"
#
rebuild_list = sorted(glob.glob(self.tmp_dir + self.prefix + "*." + self.extension_images))
#
if self.user_convert_params == "smart":
checkimg_pool_map = self.main_pool.starmap_async(do_check_img_greyscale, zip(rebuild_list))
checkimg_wait_rounds = 0
while not checkimg_pool_map.ready() and (self.main_pool is not None):
checkimg_wait_rounds += 1
if checkimg_wait_rounds % 10 == 0:
self.log("Checking page colors...")
time.sleep(0.5)
result_check_img = checkimg_pool_map.get()
if all(result_check_img):
self.log("No color pages detected. Smart mode will use 'best' preset.")
self.user_convert_params = "best"
else:
self.log("Color pages detected. Smart mode will use 'jpeg' preset.")
self.user_convert_params = "jpeg"
#
if self.user_convert_params == "fast":
convert_params = preset_fast
elif self.user_convert_params == "best":
convert_params = preset_best
elif self.user_convert_params == "grayscale":
convert_params = preset_grayscale
elif self.user_convert_params == "jpeg":
convert_params = preset_jpeg
elif self.user_convert_params == "jpeg2000":
convert_params = preset_jpeg2000
else:
convert_params = self.user_convert_params
# Handle default case
if convert_params == "":
convert_params = preset_best
#
self.log("Rebuilding PDF from images")
rebuild_pool_map = self.main_pool.starmap_async(do_rebuild,
zip(rebuild_list,
itertools.repeat(self.path_convert),
itertools.repeat(convert_params),
itertools.repeat(self.tmp_dir),
itertools.repeat(self.shell_mode)))
rebuild_wait_rounds = 0
while not rebuild_pool_map.ready() and (self.main_pool is not None):
rebuild_wait_rounds += 1
pages_processed = len(glob.glob(self.tmp_dir + "REBUILD_" + self.prefix + "*.pdf"))
if rebuild_wait_rounds % 10 == 0:
self.log("Waiting for PDF rebuild to complete. {0}/{1} pages completed...".format(pages_processed, self.input_file_number_of_pages))
time.sleep(0.5)
#
rebuilt_pdf_file_list = sorted(glob.glob(self.tmp_dir + "REBUILD_{0}*.pdf".format(self.prefix)))
self.debug("We have {0} rebuilt PDF files".format(len(rebuilt_pdf_file_list)))
if len(rebuilt_pdf_file_list) > 0:
pdf_merger = PyPDF2.PdfFileMerger()
for rebuilt_pdf_file in rebuilt_pdf_file_list:
pdf_merger.append(PyPDF2.PdfFileReader(rebuilt_pdf_file, strict=False))
pdf_merger.write(self.tmp_dir + self.prefix + "-input_unprotected.pdf")
pdf_merger.close()
else:
eprint("No PDF files generated after image rebuilding. This is not expected. Aborting.")
self.cleanup()
sys.exit(1)
self.debug("PDF rebuilding completed")
#
if not self.ocr_ignored:
self._merge_ocr((self.tmp_dir + self.prefix + "-input_unprotected.pdf"),
(self.tmp_dir + self.prefix + "-ocr.pdf"),
(self.tmp_dir + self.prefix + "-OUTPUT.pdf"), "rebuild-merge")
else:
shutil.copyfile((self.tmp_dir + self.prefix + "-input_unprotected.pdf"), (self.tmp_dir + self.prefix + "-OUTPUT.pdf"))
def try_repair_input_and_merge(self):
self.debug("Fail to merge source PDF with extracted OCR text. Trying to fix source PDF to build final file...")
prepair1 = subprocess.Popen(
[self.path_pdf2ps, self.input_file, self.tmp_dir + self.prefix + "-fixPDF.ps"],
stdout=subprocess.DEVNULL,
stderr=open(self.tmp_dir + "err_pdf2ps-{0}.log".format(self.prefix), "wb"),
shell=self.shell_mode)
prepair1.wait()
prepair2 = subprocess.Popen([self.path_ps2pdf, self.tmp_dir + self.prefix + "-fixPDF.ps",
self.tmp_dir + self.prefix + "-fixPDF.pdf"],
stdout=subprocess.DEVNULL,
stderr=open(self.tmp_dir + "err_ps2pdf-{0}.log".format(self.prefix),
"wb"), shell=self.shell_mode)
prepair2.wait()
#
self._merge_ocr((self.tmp_dir + self.prefix + "-fixPDF.pdf"),
(self.tmp_dir + self.prefix + "-ocr.pdf"),
(self.tmp_dir + self.prefix + "-OUTPUT.pdf"), "repair_input")
def create_text_output(self):
# Create final text output
if self.create_text_mode:
text_files = sorted(glob.glob(self.tmp_dir + self.prefix + "*.txt"))
text_io_wrapper = open(self.output_file_text, 'wb')
with text_io_wrapper as outfile:
for fname in text_files:
with open(fname, 'rb') as infile:
outfile.write(infile.read())
#
text_io_wrapper.close()
#
self.log("Created final text file")
def join_ocred_pdf(self):
# Join PDF files into one file that contains all OCR "backgrounds"
text_pdf_file_list = sorted(glob.glob(self.tmp_dir + "{0}*.{1}".format(self.prefix, "pdf")))
self.debug("We have {0} ocr'ed files".format(len(text_pdf_file_list)))
if len(text_pdf_file_list) > 0:
pdf_merger = PyPDF2.PdfFileMerger()
for text_pdf_file in text_pdf_file_list:
pdf_merger.append(PyPDF2.PdfFileReader(text_pdf_file, strict=False))
pdf_merger.write(self.tmp_dir + self.prefix + "-ocr.pdf")
pdf_merger.close()
else:
eprint("No PDF files generated after OCR. This is not expected. Aborting.")
self.cleanup()
sys.exit(1)
#
self.debug("Joined ocr'ed PDF files")
def external_ocr(self, image_file_list):
if self.ocr_engine in ["cuneiform", "tesseract"]:
self.log("Starting OCR with {0}...".format(self.ocr_engine))
if self.ocr_engine == "cuneiform":
ocr_pool_map = self.main_pool.starmap_async(do_ocr_cuneiform,
zip(image_file_list,
itertools.repeat(self.extra_ocr_flag),
itertools.repeat(self.tess_langs),
itertools.repeat(self.tmp_dir),
itertools.repeat(self.shell_mode),
itertools.repeat(self.path_cuneiform)))
elif self.ocr_engine == "tesseract":
ocr_pool_map = self.main_pool.starmap_async(do_ocr_tesseract,
zip(image_file_list,
itertools.repeat(self.extra_ocr_flag),
itertools.repeat(self.tess_langs),
itertools.repeat(self.tess_psm),
itertools.repeat(self.tmp_dir),
itertools.repeat(self.shell_mode),
itertools.repeat(self.path_tesseract),
itertools.repeat(self.text_generation_strategy),
itertools.repeat(self.delete_temps),
itertools.repeat(self.tesseract_can_textonly_pdf)))
else:
ocr_pool_map = None # Should never happen
#
ocr_rounds = 0
while not ocr_pool_map.ready() and (self.main_pool is not None):
ocr_rounds += 1
pages_processed = len(glob.glob(self.tmp_dir + self.prefix + "*.tmp"))
if ocr_rounds % 10 == 0:
self.log("Waiting for OCR to complete. {0}/{1} pages completed...".format(pages_processed, self.input_file_number_of_pages))
time.sleep(0.5)
#
self.log("OCR completed")
self.ocr_ignored = False
else:
self.log("OCR ignored")
self.ocr_ignored = True
def autorotate_info(self, image_file_list):
if self.use_autorotate:
self.debug("Calculating autorotate values...")
autorotate_pool_map = self.main_pool.starmap_async(do_autorotate_info,
zip(image_file_list,
itertools.repeat(self.shell_mode),
itertools.repeat(self.tmp_dir),
itertools.repeat(self.tess_langs),
itertools.repeat(self.path_tesseract),
itertools.repeat(self.tesseract_version)))
autorotate_rounds = 0
while not autorotate_pool_map.ready() and (self.main_pool is not None):
autorotate_rounds += 1
pages_processed = len(glob.glob(self.tmp_dir + self.prefix + "*.osd"))
if autorotate_rounds % 10 == 0:
self.log("Waiting for autorotate. {0}/{1} pages completed...".format(pages_processed, self.input_file_number_of_pages))
time.sleep(0.5)
#
def autorotate_final_output(self):
param_source_file = self.tmp_dir + self.prefix + "-OUTPUT.pdf"
param_dest_file = self.tmp_dir + self.prefix + "-OUTPUT-ROTATED.pdf"
# method "autorotate_info" generated these OSD files
list_osd = sorted(glob.glob(self.tmp_dir + "{0}*.{1}".format(self.prefix, "osd")))
skip_autorotate = False
if self.use_autorotate and (len(list_osd) != self.input_file_number_of_pages):
eprint("Skipping autorotation because OSD files were not correctly generated. Check input file and "
"tesseract logs")
skip_autorotate = True
#
if self.use_autorotate and not skip_autorotate:
self.debug("Autorotate final output")
file_source = open(param_source_file, 'rb')
pre_output_pdf = PyPDF2.PdfFileReader(file_source, strict=False)
final_output_pdf = PyPDF2.PdfFileWriter()
rotation_angles = []
osd_page_num = 0
for osd_information_file in list_osd:
with open(osd_information_file, 'r') as f:
osd_information_string = '[root]\n' + f.read() # A dummy section to satisfy ConfigParser
f.close()
osd_page_num += 1
config_osd = configparser.ConfigParser()
config_osd.read_file(io.StringIO(osd_information_string))
try:
rotate_value = config_osd.getint('root', 'Rotate')
except configparser.NoOptionError:
eprint("Error reading rotate page value from page {0}. Assuming zero as rotation angle.".format(
osd_page_num))
rotate_value = 0
rotation_angles.append(rotate_value)
#
for i in range(pre_output_pdf.getNumPages()):
page = pre_output_pdf.getPage(i)
page.rotateClockwise(rotation_angles[i])
final_output_pdf.addPage(page)
#
with open(param_dest_file, 'wb') as f:
final_output_pdf.write(f)
f.close()
#
file_source.close()
else:
# No autorotate, just rename the file to next method process correctly
self.debug("Autorotate skipped")
os.rename(param_source_file, param_dest_file)
def deskew(self, image_file_list):
if self.use_deskew_mode:
self.debug("Applying deskew (will rebuild final PDF file)")
deskew_pool_map = self.main_pool.starmap_async(do_deskew, zip(image_file_list, itertools.repeat(self.deskew_threshold),
itertools.repeat(self.shell_mode), itertools.repeat(self.path_mogrify)))
deskew_wait_rounds = 0
while not deskew_pool_map.ready() and (self.main_pool is not None):
deskew_wait_rounds += 1
pages_processed = len([x for x in deskew_pool_map._value if x is not None])
if deskew_wait_rounds % 10 == 0:
self.log("Waiting for deskew to complete. {0}/{1} pages completed...".format(pages_processed, self.input_file_number_of_pages))
time.sleep(0.5)
def convert_input_to_images(self):
self.log("Converting input file to images...")
if self.input_file_type == "application/pdf":
parallel_page_ranges = self.calculate_ranges()
if parallel_page_ranges is not None:
# TODO - try to use method inside this class (encapsulate do_pdftoimage)
do_pdftoimage_result_codes = self.main_pool.starmap(do_pdftoimage, zip(itertools.repeat(self.path_pdftoppm),
parallel_page_ranges,
itertools.repeat(self.input_file),
itertools.repeat(self.image_resolution),
itertools.repeat(self.tmp_dir),
itertools.repeat(self.prefix),
itertools.repeat(self.shell_mode)))
else:
# Without page info, only alternative is going sequentialy (without range)
do_pdftoimage_result_code = do_pdftoimage(self.path_pdftoppm, None, self.input_file, self.image_resolution, self.tmp_dir,
self.prefix, self.shell_mode)
do_pdftoimage_result_codes = [do_pdftoimage_result_code]
#
if not all(ret_code == 0 for ret_code in do_pdftoimage_result_codes):
eprint("Fail to create images from PDF. Exiting.")
self.cleanup()
sys.exit(1)
else:
if self.input_file_type in ["image/tiff", "image/jpeg", "image/png"]:
# %09d to format files for correct sort
p = subprocess.Popen([self.path_convert, self.input_file, '-quality', '100', '-scene', '1',
self.tmp_dir + self.prefix + '-%09d.' + self.extension_images],
shell=self.shell_mode)
p.wait()
else:
eprint("{0} is not supported in this script. Exiting.".format(self.input_file_type))
self.cleanup()
sys.exit(1)
def initial_cleanup(self):
Pdf2PdfOcr.best_effort_remove(self.output_file)
if self.create_text_mode:
Pdf2PdfOcr.best_effort_remove(self.output_file_text)
def define_output_files(self):
if self.force_out_file_mode:
self.output_file = self.force_out_file
else:
if self.force_out_dir_mode:
output_dir = os.path.abspath(self.force_out_dir)
else:
output_dir = os.path.dirname(self.input_file)
output_name_no_ext = os.path.splitext(os.path.basename(self.input_file))[0]
self.output_file = output_dir + os.path.sep + output_name_no_ext + "-OCR.pdf"
#
self.output_file_text = self.output_file + ".txt"
self.debug("Output file: {0} for PDF and {1} for TXT".format(self.output_file, self.output_file_text))
if (self.safe_mode and os.path.isfile(self.output_file)) or \
(self.safe_mode and self.create_text_mode and os.path.isfile(self.output_file_text)):
if os.path.isfile(self.output_file):
eprint("{0} already exists and safe mode is enabled. Exiting.".format(self.output_file))
if self.create_text_mode and os.path.isfile(self.output_file_text):
eprint("{0} already exists and safe mode is enabled. Exiting.".format(self.output_file_text))
self.cleanup()
sys.exit(1)
def validate_pdf_input_file(self):
try:
pdf_file_obj = open(self.input_file, 'rb')
pdf_reader = PyPDF2.PdfFileReader(pdf_file_obj, strict=False)
except PdfReadError:
eprint("Corrupted PDF file detected. Aborting...")
self.cleanup()
sys.exit(1)
#
try:
self.input_file_number_of_pages = pdf_reader.getNumPages()
except Exception:
eprint("Warning: could not read input file number of pages.")
self.input_file_number_of_pages = None # Will be calculated later based on number of image files to process
#
self.check_avoid_high_pages()
#
self.input_file_is_encrypted = pdf_reader.isEncrypted
if not self.input_file_is_encrypted:
self.input_file_metadata = pdf_reader.documentInfo
#
if self.check_text_mode:
self.input_file_has_text = self.check_for_text()
#
if self.input_file_type == "application/pdf" and self.check_text_mode and self.input_file_has_text:
eprint("{0} already has text and check text mode is enabled. Exiting.".format(self.input_file))
self.cleanup()
sys.exit(1)
#
if self.input_file_type == "application/pdf" and self.check_protection_mode and self.input_file_is_encrypted:
eprint("{0} is encrypted PDF and check encryption mode is enabled. Exiting.".format(self.input_file))
self.cleanup()
sys.exit(1)
def check_avoid_high_pages(self):
if self.input_file_number_of_pages is not None and self.avoid_high_pages_mode \
and self.input_file_number_of_pages > self.avoid_high_pages_pages:
eprint("Input file has {0} pages and maximum for process in avoid high number of pages mode (-b) is {1}. "
"Exiting.".format(self.input_file_number_of_pages, self.avoid_high_pages_pages))
self.cleanup()
sys.exit(1)
def check_avoid_file_by_size(self):
if self.avoid_small_file_mode:
input_file_size_kb = os.path.getsize(self.input_file) / 1024
if input_file_size_kb < self.avoid_small_file_limit_kb:
eprint("Input file has {0:.2f} KBytes and minimum size to process (--min-kbytes) is {1:.2f} KBytes. "
"Exiting.".format(input_file_size_kb, self.avoid_small_file_limit_kb))
self.cleanup()
sys.exit(1)
def check_for_text(self):
"""Check if input file contains text. Actually based on pdffonts from poppler"""
ptext = subprocess.Popen([self.path_pdffonts, self.input_file], stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL, shell=self.shell_mode)
ptext_output, ptext_errors = ptext.communicate()
ptext.wait()
pdffonts_text_output_lines = ptext_output.decode("utf-8").strip().splitlines()
# Return without fonts has exactly 2 header lines.
# All return with more than 2 lines should mean we have some font (text) in the file.
if len(pdffonts_text_output_lines) > 2:
return True
else:
return False
def detect_file_type(self):
"""Detect mime type of input file"""
pfile = subprocess.Popen([self.path_file, '-b', '--mime-type', self.input_file], stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL, shell=self.shell_mode)
pfile_output, pfile_errors = pfile.communicate()
pfile.wait()
self.input_file_type = pfile_output.decode("utf-8").strip()
self.log("Input file {0}: type is {1}".format(self.input_file, self.input_file_type))
def test_convert(self):
"""
test convert command to check if it's ImageMagick
:return: True if it's ImageMagicks convert, false with any other case or error
"""
try:
result = False
test_image = self.tmp_dir + "converttest-" + self.prefix + ".jpg"
ptest = subprocess.Popen([self.path_convert, 'rose:', test_image], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
shell=self.shell_mode)
ptest.wait()
return_code = ptest.returncode
if (return_code == 0) and (os.path.isfile(test_image)):
Pdf2PdfOcr.best_effort_remove(test_image)
result = True
return result
except Exception:
self.log("Error testing convert utility. Assuming there is no 'convert' available...")
return False
def test_tesseract_textonly_pdf(self):
result = False
try:
result = ('textonly_pdf' in subprocess.check_output([self.path_tesseract, '--print-parameters'], universal_newlines=True))
except Exception:
self.log("Error checking tesseract capabilities. Trying to continue without 'textonly_pdf' in Tesseract")
#
self.debug("Tesseract can 'textonly_pdf': {0}".format(result))
return result
def get_tesseract_version(self):
# Inspired by the great lib 'pytesseract' - https://github.com/madmaze/pytesseract/blob/master/src/pytesseract.py
try:
version_info = subprocess.check_output([self.path_tesseract, '--version'], stderr=subprocess.STDOUT).decode('utf-8').split()
# self.debug("Tesseract full version info: {0}".format(version_info))
version_info = version_info[1].lstrip(string.printable[10:])
l_version_info = LooseVersion(version_info)
result = int(l_version_info.version[0])
self.debug("Tesseract version: {0}".format(result))
return result
except Exception as e:
self.log("Error checking tesseract version. Trying to continue assuming legacy version 3. Exception was {0}".format(e))
return 3
def get_qpdf_version(self):
try:
version_info = subprocess.check_output([self.path_qpdf, '--version'], stderr=subprocess.STDOUT).decode('utf-8').split()
version_info = version_info[2]
l_version_info = LooseVersion(version_info)
self.debug("Qpdf version: {0}".format(l_version_info))
return l_version_info
except Exception as e:
legacy_version = "8.4.0"
self.log("Error checking qpdf version. Trying to continue assuming legacy version {0}. Exception was {1}".format(legacy_version, e))
return LooseVersion(legacy_version)
def get_pdftoppm_version(self):
try:
version_info = subprocess.check_output([self.path_pdftoppm, '-v'], stderr=subprocess.STDOUT).decode('utf-8').split()
version_info = version_info[2]
l_version_info = LooseVersion(version_info)
self.debug("Pdftoppm version: {0}".format(l_version_info))
return l_version_info
except Exception as e:
legacy_version = "0.70.0"
self.log("Error checking pdftoppm version. Trying to continue assuming legacy version {0}. Exception was {1}".format(legacy_version, e))
return LooseVersion(legacy_version)
def calculate_ranges(self):
"""
calculate ranges to run pdftoppm in parallel. Each CPU available will run well defined page range
:return:
"""
if (self.input_file_number_of_pages is None) or (self.input_file_number_of_pages < 20): # 20 to avoid unnecessary parallel operation
return None
#
range_size = math.ceil(self.input_file_number_of_pages / self.cpu_to_use)
number_of_ranges = math.ceil(self.input_file_number_of_pages / range_size)
result = []
for i in range(0, number_of_ranges):
range_start = (range_size * i) + 1
range_end = (range_size * i) + range_size
# Handle last range
if range_end > self.input_file_number_of_pages:
range_end = self.input_file_number_of_pages
result.append((range_start, range_end))
# Check result
check_pages = 0
for created_range in result:
check_pages += (created_range[1] - created_range[0]) + 1
if check_pages != self.input_file_number_of_pages:
raise ArithmeticError("Please check 'calculate_ranges' function, something is wrong...")
#
return result
def edit_producer(self):
self.debug("Editing producer")
param_source_file = self.tmp_dir + self.prefix + "-OUTPUT-ROTATED.pdf"
file_source = open(param_source_file, 'rb')
pre_output_pdf = PyPDF2.PdfFileReader(file_source, strict=False)
final_output_pdf = PyPDF2.PdfFileWriter()
for i in range(pre_output_pdf.getNumPages()):
page = pre_output_pdf.getPage(i)
final_output_pdf.addPage(page)
info_dict_output = dict()
# Our signature as a producer
our_name = "PDF2PDFOCR(github.com/LeoFCardoso/pdf2pdfocr)"
read_producer = False
producer_key = "/Producer"
if self.input_file_metadata is not None:
for key in self.input_file_metadata:
value = self.input_file_metadata[key]
if key == producer_key:
if type(value) == ByteStringObject:
value = str(value, errors="ignore")
value = "".join(filter(lambda x: x in string.printable, value)) # Try to remove unprintable
value = value + "; " + our_name
read_producer = True
#
try:
# Check if value can be accepted by pypdf API
PyPDF2.generic.createStringObject(value)
info_dict_output[key] = value
except TypeError:
# This can happen with some array properties.
eprint("Warning: property " + key + " not copied to final PDF")
#
if not read_producer:
info_dict_output[producer_key] = our_name
#
final_output_pdf.addMetadata(info_dict_output)
#
with open(self.output_file, 'wb') as f:
final_output_pdf.write(f)
f.close()
#
file_source.close()
@staticmethod
def best_effort_remove(filename):
try:
os.remove(filename)
except OSError as e:
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a different error occured
# To be used on signal handling
def sigint_handler(signum, frame):
global pdf2ocr
pdf2ocr.cleanup()
sys.exit(1)
# -------------
# MAIN
# -------------
if __name__ == '__main__':
# https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming
# See "Safe importing of main module"
multiprocessing.freeze_support() # Should make effect only on non-fork systems (Windows)
#
# From tesseract docs:
# "If the tesseract executable was built with multithreading support, it will normally use four CPU cores for the OCR process. While this can be
# faster for a single image, it gives bad performance if the host computer provides less than four CPU cores or if OCR is made for many images.
# Only a single CPU core is used with OMP_THREAD_LIMIT=1"
# As we control number of parallel executions, set this env var for the entire script.
os.environ['OMP_THREAD_LIMIT'] = '1'
#
# Arguments
parser = argparse.ArgumentParser(
description=('pdf2pdfocr.py [https://github.com/LeoFCardoso/pdf2pdfocr] version %s (http://semver.org/lang/pt-BR/)' % VERSION),
formatter_class=argparse.RawTextHelpFormatter)
requiredNamed = parser.add_argument_group('required arguments')
requiredNamed.add_argument("-i", dest="input_file", action="store", required=True,
help="path for input file")
#
parser.add_argument("-c", dest="ocr_engine", action="store", default="tesseract", type=str,
help="specify OCR engine (tesseract, cuneiform, no_ocr). "
"Use no_ocr to skip OCR (for example, to test -f/-g configurations). "
"Default: tesseract")
parser.add_argument("-s", dest="safe_mode", action="store_true", default=False,
help="safe mode. Does not overwrite output [PDF | TXT] OCR file")
parser.add_argument("-t", dest="check_text_mode", action="store_true", default=False,
help="check text mode. Does not process if source PDF already has text")
parser.add_argument("-a", dest="check_protection_mode", action="store_true", default=False,
help="check encryption mode. Does not process if source PDF is protected")
parser.add_argument("-b", dest="max_pages", action="store", default=None, type=int,
help="avoid high number of pages mode. Does not process if number of pages is greater "
"than <MAX_PAGES>")
parser.add_argument("--min-kbytes", dest="min_kbytes", action="store", default=None, type=int,
help="avoid small files. Does not process if size of input file is lower than <min-kbytes>")
parser.add_argument("-f", dest="force_rebuild_mode", action="store_true", default=False,
help="force PDF rebuild from extracted images")
# Escape % wiht %%
option_g_help = """with image input or '-f', use presets or force parameters when calling 'convert' to build the final PDF file
Examples:
-g fast -> a fast bitonal file ("-threshold 60%% -compress Group4")
-g best -> best quality, but bigger bitonal file ("-colors 2 -colorspace gray -normalize -threshold 60%% -compress Group4")
-g grayscale -> good bitonal file from grayscale documents ("-threshold 85%% -morphology Dilate Diamond -compress Group4")
-g jpeg -> keep original color image as JPEG ("-strip -interlace Plane -gaussian-blur 0.05 -quality 50%% -compress JPEG")
-g jpeg2000 -> keep original color image as JPEG2000 ("-quality 32%% -compress JPEG2000")
-g smart -> try to autodetect colors and use 'jpeg' preset if one color page is detected, otherwise use preset 'best'
-g="-threshold 60%% -compress Group4" -> direct apply these parameters (DON'T FORGET TO USE EQUAL SIGN AND QUOTATION MARKS)
Note, without -g, preset 'best' is used"""
parser.add_argument("-g", dest="convert_params", action="store", default="",
help=option_g_help)
parser.add_argument("-d", dest="deskew_percent", action="store",
help="use imagemagick deskew *before* OCR. <DESKEW_PERCENT> should be a percent, e.g. '40%%'")
parser.add_argument("-u", dest="autorotate", action="store_true", default=False,
help="try to autorotate pages using 'psm 0' feature [tesseract only]")
parser.add_argument("-j", dest="parallel_percent", action="store", type=percentual_float,
help="run this percentual jobs in parallel (0 - 1.0] - multiply with the number of CPU cores"
" (default = 1 [all cores])")
parser.add_argument("-w", dest="create_text_mode", action="store_true", default=False,
help="also create a text file at same location of PDF OCR file [tesseract only]")
parser.add_argument("-o", dest="output_file", action="store", required=False,
help="path for output file")
parser.add_argument("-O", dest="output_dir", action="store", required=False,
help="path for output directory")
parser.add_argument("-p", dest="no_effect_01", action="store_true", default=False,
help="no effect, do not use (reverse compatibility)")
parser.add_argument("-r", dest="image_resolution", action="store", default=300, type=int,
help="specify image resolution in DPI before OCR operation - lower is faster, higher "
"improves OCR quality (default is for quality = 300)")
parser.add_argument("-e", dest="text_generation_strategy", action="store", default="tesseract", type=str,
help="specify how text is generated in final pdf file (tesseract, native) [tesseract only]. Default: tesseract")
parser.add_argument("-l", dest="tess_langs", action="store", required=False,
help="force tesseract or cuneiform to use specific language (default: por+eng)")
parser.add_argument("-m", dest="tess_psm", action="store", required=False,
help="force tesseract to use OCR with specific \"pagesegmode\" (default: tesseract "
"OCR default = 1) [tesseract only]. Use with caution")
parser.add_argument("-x", dest="extra_ocr_flag", action="store", required=False,
help="add extra command line flags in select OCR engine for all pages. Use with caution")
parser.add_argument("--timeout", dest="timeout", action="store", default=None, type=int,
help="run with time limit in seconds")
parser.add_argument("-k", dest="keep_temps", action="store_true", default=False,
help="keep temporary files for debug")
parser.add_argument("-v", dest="verbose_mode", action="store_true", default=False,
help="enable verbose mode")
parser.add_argument("-P", dest="pause_end_mode", action="store_true", default=False,
help="with successful execution, wait for user to press <Enter> at the final of the "
"script (default: not wait)")
# Dummy to be called by gooey (GUI)
parser.add_argument("--ignore-gooey", action="store_true", required=False, default=False)
#
pdf2ocr_args = parser.parse_args()
#
pdf2ocr = Pdf2PdfOcr(pdf2ocr_args)
#
signal.signal(signal.SIGINT, sigint_handler)
#
if pdf2ocr_args.timeout:
#
# https://stackoverflow.com/questions/56305195/is-it-possible-to-specify-the-max-amount-of-time-to-wait-for-code-to-run-with-py/56305465
with futures.ThreadPoolExecutor(max_workers=1) as executor:
future_pdf2ocr = executor.submit(pdf2ocr.ocr)
try:
future_pdf2ocr.result(pdf2ocr_args.timeout)
except futures.TimeoutError as fte:
#
# https://stackoverflow.com/questions/48350257/how-to-exit-a-script-after-threadpoolexecutor-has-timed-out
import atexit
atexit.unregister(futures.thread._python_exit)
executor.shutdown = lambda wait: None
#
pdf2ocr.cleanup()
eprint("Script stopped due to timeout of {0} seconds".format(pdf2ocr_args.timeout))
sys.exit(1)
else:
pdf2ocr.ocr()
#
if pdf2ocr_args.pause_end_mode:
input("Press <Enter> to continue...")
#
sys.exit(0)
#
# This is the end
|
LeoFCardoso/pdf2pdfocr
|
pdf2pdfocr.py
|
Python
|
apache-2.0
| 73,585
|
[
"Gaussian"
] |
c26036c08232b2594b63afe97f74a5c133d53ffd32be83cf650b2375149eabd4
|
"""cursor_handler.py - Cursor handler."""
import gobject
import gtk
from mcomix import constants
class CursorHandler:
def __init__(self, window):
self._window = window
self._timer_id = None
self._auto_hide = False
self._current_cursor = constants.NORMAL_CURSOR
def set_cursor_type(self, cursor):
"""Set the cursor to type <cursor>. Supported cursor types are
available as constants in this module. If <cursor> is not one of the
cursor constants above, it must be a gtk.gdk.Cursor.
"""
if cursor == constants.NORMAL_CURSOR:
mode = None
elif cursor == constants.GRAB_CURSOR:
mode = gtk.gdk.Cursor(gtk.gdk.FLEUR)
elif cursor == constants.WAIT_CURSOR:
mode = gtk.gdk.Cursor(gtk.gdk.WATCH)
elif cursor == constants.NO_CURSOR:
mode = self._get_hidden_cursor()
else:
mode = cursor
self._window.set_cursor(mode)
self._current_cursor = cursor
if self._auto_hide:
if cursor == constants.NORMAL_CURSOR:
self._set_hide_timer()
else:
self._kill_timer()
def auto_hide_on(self):
"""Signal that the cursor should auto-hide from now on (e.g. that
we are entering fullscreen).
"""
self._auto_hide = True
if self._current_cursor == constants.NORMAL_CURSOR:
self._set_hide_timer()
def auto_hide_off(self):
"""Signal that the cursor should *not* auto-hide from now on."""
self._auto_hide = False
self._kill_timer()
if self._current_cursor == constants.NORMAL_CURSOR:
self.set_cursor_type(constants.NORMAL_CURSOR)
def refresh(self):
"""Refresh the current cursor (i.e. display it and set a new timer in
fullscreen). Used when we move the cursor.
"""
if self._auto_hide:
self.set_cursor_type(self._current_cursor)
def _set_hide_timer(self):
self._kill_timer()
self._timer_id = gobject.timeout_add(2000, self._window.set_cursor,
self._get_hidden_cursor())
def _kill_timer(self):
if self._timer_id is not None:
gobject.source_remove(self._timer_id)
def _get_hidden_cursor(self):
pixmap = gtk.gdk.Pixmap(None, 1, 1, 1)
color = gtk.gdk.Color()
return gtk.gdk.Cursor(pixmap, pixmap, color, color, 0, 0)
# vim: expandtab:sw=4:ts=4
|
mxtthias/mcomix
|
mcomix/cursor_handler.py
|
Python
|
gpl-2.0
| 2,509
|
[
"FLEUR"
] |
b24cb22d078712f30b2fe4a6e756200f769833b4a9f468dcef2aa1e1ae83c34b
|
#!/usr/bin/env python
from setuptools import setup, Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
setup(
name='shand',
version='0.1',
description='A pipeline for investigating cospeciation in microbiomes',
scripts=['scripts/shand'],
url='http://github.com/ryneches/Shand',
author='Russell Neches',
author_email='ryneches@ucdavis.edu',
license='BSD',
packages=['shand'],
install_requires=[
'pandas',
'screed',
'hat_trie',
'scikit-bio',
'pyprind',
'psutil',
'cython'
],
zip_safe=False,
ext_modules = cythonize( 'shand/quicktree.pyx' ),
test_suite = 'nose.collector'
)
|
ryneches/Shand
|
setup.py
|
Python
|
bsd-3-clause
| 717
|
[
"scikit-bio"
] |
537a8375b935879d5164f21b03fbacef1f02a06ee5b1f02949070381c017f5eb
|
"""
This is only meant to add docs to objects defined in C-extension modules.
The purpose is to allow easier editing of the docstrings without
requiring a re-compile.
NOTE: Many of the methods of ndarray have corresponding functions.
If you update these docstrings, please keep also the ones in
core/fromnumeric.py, core/defmatrix.py up-to-date.
"""
from __future__ import division, absolute_import, print_function
from numpy.core import numerictypes as _numerictypes
from numpy.core import dtype
from numpy.core.function_base import add_newdoc
###############################################################################
#
# flatiter
#
# flatiter needs a toplevel description
#
###############################################################################
add_newdoc('numpy.core', 'flatiter',
"""
Flat iterator object to iterate over arrays.
A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in row-major, C-style order (the last
index varying the fastest). The iterator can also be indexed using
basic slicing or advanced indexing.
See Also
--------
ndarray.flat : Return a flat iterator over an array.
ndarray.flatten : Returns a flattened copy of an array.
Notes
-----
A `flatiter` iterator can not be constructed directly from Python code
by calling the `flatiter` constructor.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> type(fl)
<type 'numpy.flatiter'>
>>> for item in fl:
... print(item)
...
0
1
2
3
4
5
>>> fl[2:4]
array([2, 3])
""")
# flatiter attributes
add_newdoc('numpy.core', 'flatiter', ('base',
"""
A reference to the array that is iterated over.
Examples
--------
>>> x = np.arange(5)
>>> fl = x.flat
>>> fl.base is x
True
"""))
add_newdoc('numpy.core', 'flatiter', ('coords',
"""
An N-dimensional tuple of current coordinates.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.coords
(0, 0)
>>> fl.next()
0
>>> fl.coords
(0, 1)
"""))
add_newdoc('numpy.core', 'flatiter', ('index',
"""
Current flat index into the array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.index
0
>>> fl.next()
0
>>> fl.index
1
"""))
# flatiter functions
add_newdoc('numpy.core', 'flatiter', ('__array__',
"""__array__(type=None) Get array from iterator
"""))
add_newdoc('numpy.core', 'flatiter', ('copy',
"""
copy()
Get a copy of the iterator as a 1-D array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> fl = x.flat
>>> fl.copy()
array([0, 1, 2, 3, 4, 5])
"""))
###############################################################################
#
# nditer
#
###############################################################################
add_newdoc('numpy.core', 'nditer',
"""
Efficient multi-dimensional iterator object to iterate over arrays.
To get started using this object, see the
:ref:`introductory guide to array iteration <arrays.nditer>`.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
flags : sequence of str, optional
Flags to control the behavior of the iterator.
* "buffered" enables buffering when required.
* "c_index" causes a C-order index to be tracked.
* "f_index" causes a Fortran-order index to be tracked.
* "multi_index" causes a multi-index, or a tuple of indices
with one per iteration dimension, to be tracked.
* "common_dtype" causes all the operands to be converted to
a common data type, with copying or buffering as necessary.
* "copy_if_overlap" causes the iterator to determine if read
operands have overlap with write operands, and make temporary
copies as necessary to avoid overlap. False positives (needless
copying) are possible in some cases.
* "delay_bufalloc" delays allocation of the buffers until
a reset() call is made. Allows "allocate" operands to
be initialized before their values are copied into the buffers.
* "external_loop" causes the `values` given to be
one-dimensional arrays with multiple values instead of
zero-dimensional arrays.
* "grow_inner" allows the `value` array sizes to be made
larger than the buffer size when both "buffered" and
"external_loop" is used.
* "ranged" allows the iterator to be restricted to a sub-range
of the iterindex values.
* "refs_ok" enables iteration of reference types, such as
object arrays.
* "reduce_ok" enables iteration of "readwrite" operands
which are broadcasted, also known as reduction operands.
* "zerosize_ok" allows `itersize` to be zero.
op_flags : list of list of str, optional
This is a list of flags for each operand. At minimum, one of
"readonly", "readwrite", or "writeonly" must be specified.
* "readonly" indicates the operand will only be read from.
* "readwrite" indicates the operand will be read from and written to.
* "writeonly" indicates the operand will only be written to.
* "no_broadcast" prevents the operand from being broadcasted.
* "contig" forces the operand data to be contiguous.
* "aligned" forces the operand data to be aligned.
* "nbo" forces the operand data to be in native byte order.
* "copy" allows a temporary read-only copy if required.
* "updateifcopy" allows a temporary read-write copy if required.
* "allocate" causes the array to be allocated if it is None
in the `op` parameter.
* "no_subtype" prevents an "allocate" operand from using a subtype.
* "arraymask" indicates that this operand is the mask to use
for selecting elements when writing to operands with the
'writemasked' flag set. The iterator does not enforce this,
but when writing from a buffer back to the array, it only
copies those elements indicated by this mask.
* 'writemasked' indicates that only elements where the chosen
'arraymask' operand is True will be written to.
* "overlap_assume_elementwise" can be used to mark operands that are
accessed only in the iterator order, to allow less conservative
copying when "copy_if_overlap" is present.
op_dtypes : dtype or tuple of dtype(s), optional
The required data type(s) of the operands. If copying or buffering
is enabled, the data will be converted to/from their original types.
order : {'C', 'F', 'A', 'K'}, optional
Controls the iteration order. 'C' means C order, 'F' means
Fortran order, 'A' means 'F' order if all the arrays are Fortran
contiguous, 'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible. This also
affects the element memory order of "allocate" operands, as they
are allocated to be compatible with iteration order.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when making a copy
or buffering. Setting this to 'unsafe' is not recommended,
as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
op_axes : list of list of ints, optional
If provided, is a list of ints or None for each operands.
The list of axes for an operand is a mapping from the dimensions
of the iterator to the dimensions of the operand. A value of
-1 can be placed for entries, causing that dimension to be
treated as "newaxis".
itershape : tuple of ints, optional
The desired shape of the iterator. This allows "allocate" operands
with a dimension mapped by op_axes not corresponding to a dimension
of a different operand to get a value not equal to 1 for that
dimension.
buffersize : int, optional
When buffering is enabled, controls the size of the temporary
buffers. Set to 0 for the default value.
Attributes
----------
dtypes : tuple of dtype(s)
The data types of the values provided in `value`. This may be
different from the operand data types if buffering is enabled.
Valid only before the iterator is closed.
finished : bool
Whether the iteration over the operands is finished or not.
has_delayed_bufalloc : bool
If True, the iterator was created with the "delay_bufalloc" flag,
and no reset() function was called on it yet.
has_index : bool
If True, the iterator was created with either the "c_index" or
the "f_index" flag, and the property `index` can be used to
retrieve it.
has_multi_index : bool
If True, the iterator was created with the "multi_index" flag,
and the property `multi_index` can be used to retrieve it.
index
When the "c_index" or "f_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
and `has_index` is False.
iterationneedsapi : bool
Whether iteration requires access to the Python API, for example
if one of the operands is an object array.
iterindex : int
An index which matches the order of iteration.
itersize : int
Size of the iterator.
itviews
Structured view(s) of `operands` in memory, matching the reordered
and optimized iterator access pattern. Valid only before the iterator
is closed.
multi_index
When the "multi_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
accessed and `has_multi_index` is False.
ndim : int
The iterator's dimension.
nop : int
The number of iterator operands.
operands : tuple of operand(s)
The array(s) to be iterated over. Valid only before the iterator is
closed.
shape : tuple of ints
Shape tuple, the shape of the iterator.
value
Value of `operands` at current iteration. Normally, this is a
tuple of array scalars, but if the flag "external_loop" is used,
it is a tuple of one dimensional arrays.
Notes
-----
`nditer` supersedes `flatiter`. The iterator implementation behind
`nditer` is also exposed by the NumPy C API.
The Python exposure supplies two iteration interfaces, one which follows
the Python iterator protocol, and another which mirrors the C-style
do-while pattern. The native Python approach is better in most cases, but
if you need the iterator's coordinates or index, use the C-style pattern.
Examples
--------
Here is how we might write an ``iter_add`` function, using the
Python iterator protocol::
def iter_add_py(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
with it:
for (a, b, c) in it:
addop(a, b, out=c)
return it.operands[2]
Here is the same function, but following the C-style pattern::
def iter_add(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
with it:
while not it.finished:
addop(it[0], it[1], out=it[2])
it.iternext()
return it.operands[2]
Here is an example outer product function::
def outer_it(x, y, out=None):
mulop = np.multiply
it = np.nditer([x, y, out], ['external_loop'],
[['readonly'], ['readonly'], ['writeonly', 'allocate']],
op_axes=[list(range(x.ndim)) + [-1] * y.ndim,
[-1] * x.ndim + list(range(y.ndim)),
None])
with it:
for (a, b, c) in it:
mulop(a, b, out=c)
return it.operands[2]
>>> a = np.arange(2)+1
>>> b = np.arange(3)+1
>>> outer_it(a,b)
array([[1, 2, 3],
[2, 4, 6]])
Here is an example function which operates like a "lambda" ufunc::
def luf(lamdaexpr, *args, **kwargs):
"luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)"
nargs = len(args)
op = (kwargs.get('out',None),) + args
it = np.nditer(op, ['buffered','external_loop'],
[['writeonly','allocate','no_broadcast']] +
[['readonly','nbo','aligned']]*nargs,
order=kwargs.get('order','K'),
casting=kwargs.get('casting','safe'),
buffersize=kwargs.get('buffersize',0))
while not it.finished:
it[0] = lamdaexpr(*it[1:])
it.iternext()
return it.operands[0]
>>> a = np.arange(5)
>>> b = np.ones(5)
>>> luf(lambda i,j:i*i + j/2, a, b)
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
If operand flags `"writeonly"` or `"readwrite"` are used the operands may
be views into the original data with the `WRITEBACKIFCOPY` flag. In this case
nditer must be used as a context manager or the nditer.close
method must be called before using the result. The temporary
data will be written back to the original data when the `__exit__`
function is called but not before:
>>> a = np.arange(6, dtype='i4')[::-2]
>>> with nditer(a, [],
... [['writeonly', 'updateifcopy']],
... casting='unsafe',
... op_dtypes=[np.dtype('f4')]) as i:
... x = i.operands[0]
... x[:] = [-1, -2, -3]
... # a still unchanged here
>>> a, x
array([-1, -2, -3]), array([-1, -2, -3])
It is important to note that once the iterator is exited, dangling
references (like `x` in the example) may or may not share data with
the original data `a`. If writeback semantics were active, i.e. if
`x.base.flags.writebackifcopy` is `True`, then exiting the iterator
will sever the connection between `x` and `a`, writing to `x` will
no longer write to `a`. If writeback semantics are not active, then
`x.data` will still point at some part of `a.data`, and writing to
one will affect the other.
""")
# nditer methods
add_newdoc('numpy.core', 'nditer', ('copy',
"""
copy()
Get a copy of the iterator in its current state.
Examples
--------
>>> x = np.arange(10)
>>> y = x + 1
>>> it = np.nditer([x, y])
>>> it.next()
(array(0), array(1))
>>> it2 = it.copy()
>>> it2.next()
(array(1), array(2))
"""))
add_newdoc('numpy.core', 'nditer', ('operands',
"""
operands[`Slice`]
The array(s) to be iterated over. Valid only before the iterator is closed.
"""))
add_newdoc('numpy.core', 'nditer', ('debug_print',
"""
debug_print()
Print the current state of the `nditer` instance and debug info to stdout.
"""))
add_newdoc('numpy.core', 'nditer', ('enable_external_loop',
"""
enable_external_loop()
When the "external_loop" was not used during construction, but
is desired, this modifies the iterator to behave as if the flag
was specified.
"""))
add_newdoc('numpy.core', 'nditer', ('iternext',
"""
iternext()
Check whether iterations are left, and perform a single internal iteration
without returning the result. Used in the C-style pattern do-while
pattern. For an example, see `nditer`.
Returns
-------
iternext : bool
Whether or not there are iterations left.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_axis',
"""
remove_axis(i)
Removes axis `i` from the iterator. Requires that the flag "multi_index"
be enabled.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_multi_index',
"""
remove_multi_index()
When the "multi_index" flag was specified, this removes it, allowing
the internal iteration structure to be optimized further.
"""))
add_newdoc('numpy.core', 'nditer', ('reset',
"""
reset()
Reset the iterator to its initial state.
"""))
add_newdoc('numpy.core', 'nested_iters',
"""
Create nditers for use in nested loops
Create a tuple of `nditer` objects which iterate in nested loops over
different axes of the op argument. The first iterator is used in the
outermost loop, the last in the innermost loop. Advancing one will change
the subsequent iterators to point at its new element.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
axes : list of list of int
Each item is used as an "op_axes" argument to an nditer
flags, op_flags, op_dtypes, order, casting, buffersize (optional)
See `nditer` parameters of the same name
Returns
-------
iters : tuple of nditer
An nditer for each item in `axes`, outermost first
See Also
--------
nditer
Examples
--------
Basic usage. Note how y is the "flattened" version of
[a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified
the first iter's axes as [1]
>>> a = np.arange(12).reshape(2, 3, 2)
>>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"])
>>> for x in i:
... print(i.multi_index)
... for y in j:
... print('', j.multi_index, y)
(0,)
(0, 0) 0
(0, 1) 1
(1, 0) 6
(1, 1) 7
(1,)
(0, 0) 2
(0, 1) 3
(1, 0) 8
(1, 1) 9
(2,)
(0, 0) 4
(0, 1) 5
(1, 0) 10
(1, 1) 11
""")
add_newdoc('numpy.core', 'nditer', ('close',
"""
close()
Resolve all writeback semantics in writeable operands.
See Also
--------
:ref:`nditer-context-manager`
"""))
###############################################################################
#
# broadcast
#
###############################################################################
add_newdoc('numpy.core', 'broadcast',
"""
Produce an object that mimics broadcasting.
Parameters
----------
in1, in2, ... : array_like
Input parameters.
Returns
-------
b : broadcast object
Broadcast the input parameters against one another, and
return an object that encapsulates the result.
Amongst others, it has ``shape`` and ``nd`` properties, and
may be used as an iterator.
See Also
--------
broadcast_arrays
broadcast_to
Examples
--------
Manually adding two vectors, using broadcasting:
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> out = np.empty(b.shape)
>>> out.flat = [u+v for (u,v) in b]
>>> out
array([[ 5., 6., 7.],
[ 6., 7., 8.],
[ 7., 8., 9.]])
Compare against built-in broadcasting:
>>> x + y
array([[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
""")
# attributes
add_newdoc('numpy.core', 'broadcast', ('index',
"""
current index in broadcasted result
Examples
--------
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> b.next(), b.next(), b.next()
((1, 4), (1, 5), (1, 6))
>>> b.index
3
"""))
add_newdoc('numpy.core', 'broadcast', ('iters',
"""
tuple of iterators along ``self``'s "components."
Returns a tuple of `numpy.flatiter` objects, one for each "component"
of ``self``.
See Also
--------
numpy.flatiter
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> row, col = b.iters
>>> row.next(), col.next()
(1, 4)
"""))
add_newdoc('numpy.core', 'broadcast', ('ndim',
"""
Number of dimensions of broadcasted result. Alias for `nd`.
.. versionadded:: 1.12.0
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.ndim
2
"""))
add_newdoc('numpy.core', 'broadcast', ('nd',
"""
Number of dimensions of broadcasted result. For code intended for NumPy
1.12.0 and later the more consistent `ndim` is preferred.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.nd
2
"""))
add_newdoc('numpy.core', 'broadcast', ('numiter',
"""
Number of iterators possessed by the broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.numiter
2
"""))
add_newdoc('numpy.core', 'broadcast', ('shape',
"""
Shape of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.shape
(3, 3)
"""))
add_newdoc('numpy.core', 'broadcast', ('size',
"""
Total size of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.size
9
"""))
add_newdoc('numpy.core', 'broadcast', ('reset',
"""
reset()
Reset the broadcasted result's iterator(s).
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]]
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> b.next(), b.next(), b.next()
((1, 4), (2, 4), (3, 4))
>>> b.index
3
>>> b.reset()
>>> b.index
0
"""))
###############################################################################
#
# numpy functions
#
###############################################################################
add_newdoc('numpy.core.multiarray', 'array',
"""
array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0)
Create an array.
Parameters
----------
object : array_like
An array, any object exposing the array interface, an object whose
__array__ method returns an array, or any (nested) sequence.
dtype : data-type, optional
The desired data-type for the array. If not given, then the type will
be determined as the minimum type required to hold the objects in the
sequence. This argument can only be used to 'upcast' the array. For
downcasting, use the .astype(t) method.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy will
only be made if __array__ returns a copy, if obj is a nested sequence,
or if a copy is needed to satisfy any of the other requirements
(`dtype`, `order`, etc.).
order : {'K', 'A', 'C', 'F'}, optional
Specify the memory layout of the array. If object is not an array, the
newly created array will be in C order (row major) unless 'F' is
specified, in which case it will be in Fortran order (column major).
If object is an array the following holds.
===== ========= ===================================================
order no copy copy=True
===== ========= ===================================================
'K' unchanged F & C order preserved, otherwise most similar order
'A' unchanged F order if input is F and not C, otherwise C order
'C' C order C order
'F' F order F order
===== ========= ===================================================
When ``copy=False`` and a copy is made for other reasons, the result is
the same as if ``copy=True``, with some exceptions for `A`, see the
Notes section. The default order is 'K'.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting
array should have. Ones will be pre-pended to the shape as
needed to meet this requirement.
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Notes
-----
When order is 'A' and `object` is an array in neither 'C' nor 'F' order,
and a copy is forced by a change in dtype, then the order of the result is
not necessarily 'C' as expected. This is likely a bug.
Examples
--------
>>> np.array([1, 2, 3])
array([1, 2, 3])
Upcasting:
>>> np.array([1, 2, 3.0])
array([ 1., 2., 3.])
More than one dimension:
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
Minimum dimensions 2:
>>> np.array([1, 2, 3], ndmin=2)
array([[1, 2, 3]])
Type provided:
>>> np.array([1, 2, 3], dtype=complex)
array([ 1.+0.j, 2.+0.j, 3.+0.j])
Data-type consisting of more than one element:
>>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')])
>>> x['a']
array([1, 3])
Creating an array from sub-classes:
>>> np.array(np.mat('1 2; 3 4'))
array([[1, 2],
[3, 4]])
>>> np.array(np.mat('1 2; 3 4'), subok=True)
matrix([[1, 2],
[3, 4]])
""")
add_newdoc('numpy.core.multiarray', 'empty',
"""
empty(shape, dtype=float, order='C')
Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
Desired output data-type for the array, e.g, `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: 'C'
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data of the given shape, dtype, and
order. Object arrays will be initialized to None.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Notes
-----
`empty`, unlike `zeros`, does not set the array values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> np.empty([2, 2])
array([[ -9.74499359e+001, 6.69583040e-309],
[ 2.13182611e-314, 3.06959433e-309]]) #random
>>> np.empty([2, 2], dtype=int)
array([[-1073741821, -1067949133],
[ 496041986, 19249760]]) #random
""")
add_newdoc('numpy.core.multiarray', 'scalar',
"""
scalar(dtype, obj)
Return a new scalar array of the given type initialized with obj.
This function is meant mainly for pickle support. `dtype` must be a
valid data-type descriptor. If `dtype` corresponds to an object
descriptor, then `obj` can be any object, otherwise `obj` must be a
string. If `obj` is not given, it will be interpreted as None for object
type and as zeros for all other types.
""")
add_newdoc('numpy.core.multiarray', 'zeros',
"""
zeros(shape, dtype=float, order='C')
Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape : int or tuple of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: 'C'
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and order.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
full : Return a new array of given shape filled with value.
Examples
--------
>>> np.zeros(5)
array([ 0., 0., 0., 0., 0.])
>>> np.zeros((5,), dtype=int)
array([0, 0, 0, 0, 0])
>>> np.zeros((2, 1))
array([[ 0.],
[ 0.]])
>>> s = (2,2)
>>> np.zeros(s)
array([[ 0., 0.],
[ 0., 0.]])
>>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
array([(0, 0), (0, 0)],
dtype=[('x', '<i4'), ('y', '<i4')])
""")
add_newdoc('numpy.core.multiarray', 'set_typeDict',
"""set_typeDict(dict)
Set the internal dictionary that can look up an array type using a
registered code.
""")
add_newdoc('numpy.core.multiarray', 'fromstring',
"""
fromstring(string, dtype=float, count=-1, sep='')
A new 1-D array initialized from text data in a string.
Parameters
----------
string : str
A string containing the data.
dtype : data-type, optional
The data type of the array; default: float. For binary input data,
the data must be in exactly this format.
count : int, optional
Read this number of `dtype` elements from the data. If this is
negative (the default), the count will be determined from the
length of the data.
sep : str, optional
The string separating numbers in the data; extra whitespace between
elements is also ignored.
.. deprecated:: 1.14
If this argument is not provided, `fromstring` falls back on the
behaviour of `frombuffer` after encoding unicode string inputs as
either utf-8 (python 3), or the default encoding (python 2).
Returns
-------
arr : ndarray
The constructed array.
Raises
------
ValueError
If the string is not the correct size to satisfy the requested
`dtype` and `count`.
See Also
--------
frombuffer, fromfile, fromiter
Examples
--------
>>> np.fromstring('1 2', dtype=int, sep=' ')
array([1, 2])
>>> np.fromstring('1, 2', dtype=int, sep=',')
array([1, 2])
""")
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
fromiter(iterable, dtype, count=-1)
Create a new 1-dimensional array from an iterable object.
Parameters
----------
iterable : iterable object
An iterable object providing data for the array.
dtype : data-type
The data-type of the returned array.
count : int, optional
The number of items to read from *iterable*. The default is -1,
which means all data is read.
Returns
-------
out : ndarray
The output array.
Notes
-----
Specify `count` to improve performance. It allows ``fromiter`` to
pre-allocate the output array, instead of resizing it on demand.
Examples
--------
>>> iterable = (x*x for x in range(5))
>>> np.fromiter(iterable, float)
array([ 0., 1., 4., 9., 16.])
""")
add_newdoc('numpy.core.multiarray', 'fromfile',
"""
fromfile(file, dtype=float, count=-1, sep='')
Construct an array from data in a text or binary file.
A highly efficient way of reading binary data with a known data-type,
as well as parsing simply formatted text files. Data written using the
`tofile` method can be read using this function.
Parameters
----------
file : file or str
Open file object or filename.
dtype : data-type
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
of the items in the file.
count : int
Number of items to read. ``-1`` means all items (i.e., the complete
file).
sep : str
Separator between items if file is a text file.
Empty ("") separator means the file should be treated as binary.
Spaces (" ") in the separator match zero or more whitespace characters.
A separator consisting only of spaces must match at least one
whitespace.
See also
--------
load, save
ndarray.tofile
loadtxt : More flexible way of loading data from a text file.
Notes
-----
Do not rely on the combination of `tofile` and `fromfile` for
data storage, as the binary files generated are are not platform
independent. In particular, no byte-order or data-type information is
saved. Data can be stored in the platform independent ``.npy`` format
using `save` and `load` instead.
Examples
--------
Construct an ndarray:
>>> dt = np.dtype([('time', [('min', int), ('sec', int)]),
... ('temp', float)])
>>> x = np.zeros((1,), dtype=dt)
>>> x['time']['min'] = 10; x['temp'] = 98.25
>>> x
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
Save the raw data to disk:
>>> import os
>>> fname = os.tmpnam()
>>> x.tofile(fname)
Read the raw data from disk:
>>> np.fromfile(fname, dtype=dt)
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
The recommended way to store and load data:
>>> np.save(fname, x)
>>> np.load(fname + '.npy')
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
""")
add_newdoc('numpy.core.multiarray', 'frombuffer',
"""
frombuffer(buffer, dtype=float, count=-1, offset=0)
Interpret a buffer as a 1-dimensional array.
Parameters
----------
buffer : buffer_like
An object that exposes the buffer interface.
dtype : data-type, optional
Data-type of the returned array; default: float.
count : int, optional
Number of items to read. ``-1`` means all data in the buffer.
offset : int, optional
Start reading the buffer from this offset (in bytes); default: 0.
Notes
-----
If the buffer has data that is not in machine byte-order, this should
be specified as part of the data-type, e.g.::
>>> dt = np.dtype(int)
>>> dt = dt.newbyteorder('>')
>>> np.frombuffer(buf, dtype=dt)
The data of the resulting array will not be byteswapped, but will be
interpreted correctly.
Examples
--------
>>> s = 'hello world'
>>> np.frombuffer(s, dtype='S1', count=5, offset=6)
array(['w', 'o', 'r', 'l', 'd'],
dtype='|S1')
>>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8)
array([1, 2], dtype=uint8)
>>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
array([1, 2, 3], dtype=uint8)
""")
add_newdoc('numpy.core', 'fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
add_newdoc('numpy.core.multiarray', 'correlate',
"""cross_correlate(a,v, mode=0)""")
add_newdoc('numpy.core.multiarray', 'arange',
"""
arange([start,] stop[, step,], dtype=None)
Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range` function, but returns an ndarray rather than a list.
When using a non-integer step, such as 0.1, the results will often not
be consistent. It is better to use `numpy.linspace` for these cases.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified as a position argument,
`start` must also be given.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
See Also
--------
linspace : Evenly spaced numbers with careful handling of endpoints.
ogrid: Arrays of evenly spaced numbers in N-dimensions.
mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
Examples
--------
>>> np.arange(3)
array([0, 1, 2])
>>> np.arange(3.0)
array([ 0., 1., 2.])
>>> np.arange(3,7)
array([3, 4, 5, 6])
>>> np.arange(3,7,2)
array([3, 5])
""")
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
"""_get_ndarray_c_version()
Return the compile time NDARRAY_VERSION number.
""")
add_newdoc('numpy.core.multiarray', '_reconstruct',
"""_reconstruct(subtype, shape, dtype)
Construct an empty array. Used by Pickles.
""")
add_newdoc('numpy.core.multiarray', 'set_string_function',
"""
set_string_function(f, repr=1)
Internal method to set a function to be used when pretty printing arrays.
""")
add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
"""
set_numeric_ops(op1=func1, op2=func2, ...)
Set numerical operators for array objects.
Parameters
----------
op1, op2, ... : callable
Each ``op = func`` pair describes an operator to be replaced.
For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace
addition by modulus 5 addition.
Returns
-------
saved_ops : list of callables
A list of all operators, stored before making replacements.
Notes
-----
.. WARNING::
Use with care! Incorrect usage may lead to memory errors.
A function replacing an operator cannot make use of that operator.
For example, when replacing add, you may not use ``+``. Instead,
directly call ufuncs.
Examples
--------
>>> def add_mod5(x, y):
... return np.add(x, y) % 5
...
>>> old_funcs = np.set_numeric_ops(add=add_mod5)
>>> x = np.arange(12).reshape((3, 4))
>>> x + x
array([[0, 2, 4, 1],
[3, 0, 2, 4],
[1, 3, 0, 2]])
>>> ignore = np.set_numeric_ops(**old_funcs) # restore operators
""")
add_newdoc('numpy.core.multiarray', 'promote_types',
"""
promote_types(type1, type2)
Returns the data type with the smallest size and smallest scalar
kind to which both ``type1`` and ``type2`` may be safely cast.
The returned data type is always in native byte order.
This function is symmetric, but rarely associative.
Parameters
----------
type1 : dtype or dtype specifier
First data type.
type2 : dtype or dtype specifier
Second data type.
Returns
-------
out : dtype
The promoted data type.
Notes
-----
.. versionadded:: 1.6.0
Starting in NumPy 1.9, promote_types function now returns a valid string
length when given an integer or float dtype as one argument and a string
dtype as another argument. Previously it always returned the input string
dtype, even if it wasn't long enough to store the max integer/float value
converted to a string.
See Also
--------
result_type, dtype, can_cast
Examples
--------
>>> np.promote_types('f4', 'f8')
dtype('float64')
>>> np.promote_types('i8', 'f4')
dtype('float64')
>>> np.promote_types('>i8', '<c8')
dtype('complex128')
>>> np.promote_types('i4', 'S8')
dtype('S11')
An example of a non-associative case:
>>> p = np.promote_types
>>> p('S', p('i1', 'u1'))
dtype('S6')
>>> p(p('S', 'i1'), 'u1')
dtype('S4')
""")
add_newdoc('numpy.core.multiarray', 'newbuffer',
"""
newbuffer(size)
Return a new uninitialized buffer object.
Parameters
----------
size : int
Size in bytes of returned buffer object.
Returns
-------
newbuffer : buffer object
Returned, uninitialized buffer object of `size` bytes.
""")
add_newdoc('numpy.core.multiarray', 'getbuffer',
"""
getbuffer(obj [,offset[, size]])
Create a buffer object from the given object referencing a slice of
length size starting at offset.
Default is the entire buffer. A read-write buffer is attempted followed
by a read-only buffer.
Parameters
----------
obj : object
offset : int, optional
size : int, optional
Returns
-------
buffer_obj : buffer
Examples
--------
>>> buf = np.getbuffer(np.ones(5), 1, 3)
>>> len(buf)
3
>>> buf[0]
'\\x00'
>>> buf
<read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
""")
add_newdoc('numpy.core', 'matmul',
"""
matmul(a, b, out=None)
Matrix product of two arrays.
The behavior depends on the arguments in the following way.
- If both arguments are 2-D they are multiplied like conventional
matrices.
- If either argument is N-D, N > 2, it is treated as a stack of
matrices residing in the last two indexes and broadcast accordingly.
- If the first argument is 1-D, it is promoted to a matrix by
prepending a 1 to its dimensions. After matrix multiplication
the prepended 1 is removed.
- If the second argument is 1-D, it is promoted to a matrix by
appending a 1 to its dimensions. After matrix multiplication
the appended 1 is removed.
Multiplication by a scalar is not allowed, use ``*`` instead. Note that
multiplying a stack of matrices with a vector will result in a stack of
vectors, but matmul will not recognize it as such.
``matmul`` differs from ``dot`` in two important ways.
- Multiplication by scalars is not allowed.
- Stacks of matrices are broadcast together as if the matrices
were elements.
.. warning::
This function is preliminary and included in NumPy 1.10.0 for testing
and documentation. Its semantics will not change, but the number and
order of the optional arguments will.
.. versionadded:: 1.10.0
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
1-D arrays then a scalar is returned; otherwise an array is
returned. If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
If scalar value is passed.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
dot : alternative matrix product with different broadcasting rules.
Notes
-----
The matmul function implements the semantics of the `@` operator introduced
in Python 3.5 following PEP465.
Examples
--------
For 2-D arrays it is the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.matmul(a, b)
array([[4, 1],
[2, 2]])
For 2-D mixed with 1-D, the result is the usual.
>>> a = [[1, 0], [0, 1]]
>>> b = [1, 2]
>>> np.matmul(a, b)
array([1, 2])
>>> np.matmul(b, a)
array([1, 2])
Broadcasting is conventional for stacks of arrays
>>> a = np.arange(2*2*4).reshape((2,2,4))
>>> b = np.arange(2*2*4).reshape((2,4,2))
>>> np.matmul(a,b).shape
(2, 2, 2)
>>> np.matmul(a,b)[0,1,1]
98
>>> sum(a[0,1,:] * b[0,:,1])
98
Vector, vector returns the scalar inner product, but neither argument
is complex-conjugated:
>>> np.matmul([2j, 3j], [2j, 3j])
(-13+0j)
Scalar multiplication raises an error.
>>> np.matmul([1,2], 3)
Traceback (most recent call last):
...
ValueError: Scalar operands are not allowed, use '*' instead
""")
add_newdoc('numpy.core.multiarray', 'c_einsum',
"""
c_einsum(subscripts, *operands, out=None, dtype=None, order='K',
casting='safe')
*This documentation shadows that of the native python implementation of the `einsum` function,
except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.*
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions. Default is None.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
optimize : {False, True, 'greedy', 'optimal'}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False and True will default to the 'greedy' algorithm.
Also accepts an explicit contraction list from the ``np.einsum_path``
function. See ``np.einsum_path`` for more details. Defaults to False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
Notes
-----
.. versionadded:: 1.6.0
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`numpy.trace`.
* Return a diagonal, :py:func:`numpy.diag`.
* Array axis summations, :py:func:`numpy.sum`.
* Transpositions and permutations, :py:func:`numpy.transpose`.
* Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
* Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
* Tensor contractions, :py:func:`numpy.tensordot`.
* Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <numpy.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view (changed in version 1.10.0).
`einsum` also provides an alternative way to provide the subscripts
and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
If the output shape is not provided in this format `einsum` will be
calculated in implicit mode, otherwise it will be performed explicitly.
The examples below have corresponding `einsum` calls with the two
parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
Trace of a matrix:
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
Extract the diagonal (requires explicit form):
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
Sum over an axis (requires explicit form):
>>> np.einsum('ij->i', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [0,1], [0])
array([ 10, 35, 60, 85, 110])
>>> np.sum(a, axis=1)
array([ 10, 35, 60, 85, 110])
For higher dimensional arrays summing a single axis can be done with ellipsis:
>>> np.einsum('...j->...', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [Ellipsis,1], [Ellipsis])
array([ 10, 35, 60, 85, 110])
Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('ij->ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.transpose(c)
array([[0, 3],
[1, 4],
[2, 5]])
Vector inner products:
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
Matrix vector multiplication:
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
Broadcasting and scalar multiplication:
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(',ij', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
Writeable returned arrays (since version 1.10.0):
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Example of ellipsis use:
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
""")
##############################################################################
#
# Documentation for ndarray attributes and methods
#
##############################################################################
##############################################################################
#
# ndarray object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray',
"""
ndarray(shape, dtype=float, buffer=None, offset=0,
strides=None, order=None)
An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type object describes the
format of each element in the array (its byte-order, how many bytes it
occupies in memory, whether it is an integer, a floating point number,
or something else, etc.)
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `numpy` module and examine the
methods and attributes of an array.
Parameters
----------
(for the __new__ method; see Notes below)
shape : tuple of ints
Shape of created array.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type.
buffer : object exposing buffer interface, optional
Used to fill the array with data.
offset : int, optional
Offset of array data in buffer.
strides : tuple of ints, optional
Strides of data in memory.
order : {'C', 'F'}, optional
Row-major (C-style) or column-major (Fortran-style) order.
Attributes
----------
T : ndarray
Transpose of the array.
data : buffer
The array's elements, in memory.
dtype : dtype object
Describes the format of the elements in the array.
flags : dict
Dictionary containing information related to memory use, e.g.,
'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
flat : numpy.flatiter object
Flattened version of the array as an iterator. The iterator
allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
assignment examples; TODO).
imag : ndarray
Imaginary part of the array.
real : ndarray
Real part of the array.
size : int
Number of elements in the array.
itemsize : int
The memory use of each array element in bytes.
nbytes : int
The total number of bytes required to store the array data,
i.e., ``itemsize * size``.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
strides : tuple of ints
The step-size required to move from one element to the next in
memory. For example, a contiguous ``(3, 4)`` array of type
``int16`` in C-order has strides ``(8, 2)``. This implies that
to move from element to element in memory requires jumps of 2 bytes.
To move from row-to-row, one needs to jump 8 bytes at a time
(``2 * 4``).
ctypes : ctypes object
Class containing properties of the array needed for interaction
with ctypes.
base : ndarray
If the array is a view into another array, that array is its `base`
(unless that array is also a view). The `base` array is where the
array data is actually stored.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
dtype : Create a data-type.
Notes
-----
There are two modes of creating an array using ``__new__``:
1. If `buffer` is None, then only `shape`, `dtype`, and `order`
are used.
2. If `buffer` is an object exposing the buffer interface, then
all keywords are interpreted.
No ``__init__`` method is needed because the array is fully initialized
after the ``__new__`` method.
Examples
--------
These examples illustrate the low-level `ndarray` constructor. Refer
to the `See Also` section above for easier ways of constructing an
ndarray.
First mode, `buffer` is None:
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
array([[ -1.13698227e+002, 4.25087011e-303],
[ 2.88528414e-306, 3.27025015e-309]]) #random
Second mode:
>>> np.ndarray((2,), buffer=np.array([1,2,3]),
... offset=np.int_().itemsize,
... dtype=int) # offset = 1*itemsize, i.e. skip first element
array([2, 3])
""")
##############################################################################
#
# ndarray attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
"""Array protocol: Python side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
"""None."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
"""Array priority."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_',
"""Allow the array to be interpreted as a ctypes object by returning the
data-memory location as an integer
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""
Base object if memory is from some other object.
Examples
--------
The base of an array that owns its memory is None:
>>> x = np.array([1,2,3,4])
>>> x.base is None
True
Slicing creates a view, whose memory is shared with x:
>>> y = x[2:]
>>> y.base is x
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
"""
An object to simplify the interaction of the array with the ctypes
module.
This attribute creates an object that makes it easier to use arrays
when calling shared libraries with the ctypes module. The returned
object has, among others, data, shape, and strides attributes (see
Notes below) which themselves return ctypes objects that can be used
as arguments to a shared library.
Parameters
----------
None
Returns
-------
c : Python object
Possessing attributes data, shape, strides, etc.
See Also
--------
numpy.ctypeslib
Notes
-----
Below are the public attributes of this object which were documented
in "Guide to NumPy" (we have omitted undocumented public attributes,
as well as documented private attributes):
.. autoattribute:: numpy.core._internal._ctypes.data
.. autoattribute:: numpy.core._internal._ctypes.shape
.. autoattribute:: numpy.core._internal._ctypes.strides
.. automethod:: numpy.core._internal._ctypes.data_as
.. automethod:: numpy.core._internal._ctypes.shape_as
.. automethod:: numpy.core._internal._ctypes.strides_as
Be careful using the ctypes attribute - especially on temporary
arrays or arrays constructed on the fly. For example, calling
``(a+b).ctypes.data_as(ctypes.c_void_p)`` returns a pointer to memory
that is invalid because the array created as (a+b) is deallocated
before the next Python statement. You can avoid this problem using
either ``c=a+b`` or ``ct=(a+b).ctypes``. In the latter case, ct will
hold a reference to the array until ct is deleted or re-assigned.
If the ctypes module is not available, then the ctypes attribute
of array objects still returns something useful, but ctypes objects
are not returned and errors may be raised instead. In particular,
the object will still have the as parameter attribute which will
return an integer equal to the data attribute.
Examples
--------
>>> import ctypes
>>> x
array([[0, 1],
[2, 3]])
>>> x.ctypes.data
30439712
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long))
<ctypes.LP_c_long object at 0x01F01300>
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents
c_long(0)
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents
c_longlong(4294967296L)
>>> x.ctypes.shape
<numpy.core._internal.c_long_Array_2 object at 0x01FFD580>
>>> x.ctypes.shape_as(ctypes.c_long)
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides_as(ctypes.c_longlong)
<numpy.core._internal.c_longlong_Array_2 object at 0x01F01300>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
"""Python buffer object pointing to the start of the array's data."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""
Data-type of the array's elements.
Parameters
----------
None
Returns
-------
d : numpy dtype object
See Also
--------
numpy.dtype
Examples
--------
>>> x
array([[0, 1],
[2, 3]])
>>> x.dtype
dtype('int32')
>>> type(x.dtype)
<type 'numpy.dtype'>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
"""
The imaginary part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.imag
array([ 0. , 0.70710678])
>>> x.imag.dtype
dtype('float64')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
"""
Length of one array element in bytes.
Examples
--------
>>> x = np.array([1,2,3], dtype=np.float64)
>>> x.itemsize
8
>>> x = np.array([1,2,3], dtype=np.complex128)
>>> x.itemsize
16
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
"""
Information about the memory layout of the array.
Attributes
----------
C_CONTIGUOUS (C)
The data is in a single, C-style contiguous segment.
F_CONTIGUOUS (F)
The data is in a single, Fortran-style contiguous segment.
OWNDATA (O)
The array owns the memory it uses or borrows it from another object.
WRITEABLE (W)
The data area can be written to. Setting this to False locks
the data, making it read-only. A view (slice, etc.) inherits WRITEABLE
from its base array at creation time, but a view of a writeable
array may be subsequently locked while the base array remains writeable.
(The opposite is not true, in that a view of a locked array may not
be made writeable. However, currently, locking a base object does not
lock any views that already reference it, so under that circumstance it
is possible to alter the contents of a locked array via a previously
created writeable view onto it.) Attempting to change a non-writeable
array raises a RuntimeError exception.
ALIGNED (A)
The data and all elements are aligned appropriately for the hardware.
WRITEBACKIFCOPY (X)
This array is a copy of some other array. The C-API function
PyArray_ResolveWritebackIfCopy must be called before deallocating
to the base array will be updated with the contents of this array.
UPDATEIFCOPY (U)
(Deprecated, use WRITEBACKIFCOPY) This array is a copy of some other array.
When this array is
deallocated, the base array will be updated with the contents of
this array.
FNC
F_CONTIGUOUS and not C_CONTIGUOUS.
FORC
F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
BEHAVED (B)
ALIGNED and WRITEABLE.
CARRAY (CA)
BEHAVED and C_CONTIGUOUS.
FARRAY (FA)
BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
Notes
-----
The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
names are only supported in dictionary access.
Only the WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be
changed by the user, via direct assignment to the attribute or dictionary
entry, or by calling `ndarray.setflags`.
The array flags cannot be set arbitrarily:
- UPDATEIFCOPY can only be set ``False``.
- WRITEBACKIFCOPY can only be set ``False``.
- ALIGNED can only be set ``True`` if the data is truly aligned.
- WRITEABLE can only be set ``True`` if the array owns its own memory
or the ultimate owner of the memory exposes a writeable buffer
interface or is a string.
Arrays can be both C-style and Fortran-style contiguous simultaneously.
This is clear for 1-dimensional arrays, but can also be true for higher
dimensional arrays.
Even for contiguous arrays a stride for a given dimension
``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
or the array has no elements.
It does *not* generally hold that ``self.strides[-1] == self.itemsize``
for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
Fortran-style contiguous arrays is true.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
"""
A 1-D iterator over the array.
This is a `numpy.flatiter` instance, which acts similarly to, but is not
a subclass of, Python's built-in iterator object.
See Also
--------
flatten : Return a copy of the array collapsed into one dimension.
flatiter
Examples
--------
>>> x = np.arange(1, 7).reshape(2, 3)
>>> x
array([[1, 2, 3],
[4, 5, 6]])
>>> x.flat[3]
4
>>> x.T
array([[1, 4],
[2, 5],
[3, 6]])
>>> x.T.flat[3]
5
>>> type(x.flat)
<type 'numpy.flatiter'>
An assignment example:
>>> x.flat = 3; x
array([[3, 3, 3],
[3, 3, 3]])
>>> x.flat[[1,4]] = 1; x
array([[3, 1, 3],
[3, 1, 3]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
"""
Total bytes consumed by the elements of the array.
Notes
-----
Does not include memory consumed by non-element attributes of the
array object.
Examples
--------
>>> x = np.zeros((3,5,2), dtype=np.complex128)
>>> x.nbytes
480
>>> np.prod(x.shape) * x.itemsize
480
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
"""
Number of array dimensions.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> x.ndim
1
>>> y = np.zeros((2, 3, 4))
>>> y.ndim
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
"""
The real part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.real
array([ 1. , 0.70710678])
>>> x.real.dtype
dtype('float64')
See Also
--------
numpy.real : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
"""
Tuple of array dimensions.
The shape property is usually used to get the current shape of an array,
but may also be used to reshape the array in-place by assigning a tuple of
array dimensions to it. As with `numpy.reshape`, one of the new shape
dimensions can be -1, in which case its value is inferred from the size of
the array and the remaining dimensions. Reshaping an array in-place will
fail if a copy is required.
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.shape
(4,)
>>> y = np.zeros((2, 3, 4))
>>> y.shape
(2, 3, 4)
>>> y.shape = (3, 8)
>>> y
array([[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> y.shape = (3, 6)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: total size of new array must be unchanged
>>> np.zeros((4,2))[::2].shape = (-1,)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: incompatible shape for a non-contiguous array
See Also
--------
numpy.reshape : similar function
ndarray.reshape : similar method
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
"""
Number of elements in the array.
Equal to ``np.prod(a.shape)``, i.e., the product of the array's
dimensions.
Notes
-----
`a.size` returns a standard arbitrary precision Python integer. This
may not be the case with other methods of obtaining the same value
(like the suggested ``np.prod(a.shape)``, which returns an instance
of ``np.int_``), and may be relevant if the value is used further in
calculations that may overflow a fixed size integer type.
Examples
--------
>>> x = np.zeros((3, 5, 2), dtype=np.complex128)
>>> x.size
30
>>> np.prod(x.shape)
30
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
"""
Tuple of bytes to step in each dimension when traversing an array.
The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
is::
offset = sum(np.array(i) * a.strides)
A more detailed explanation of strides can be found in the
"ndarray.rst" file in the NumPy reference guide.
Notes
-----
Imagine an array of 32-bit integers (each 4 bytes)::
x = np.array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]], dtype=np.int32)
This array is stored in memory as 40 bytes, one after the other
(known as a contiguous block of memory). The strides of an array tell
us how many bytes we have to skip in memory to move to the next position
along a certain axis. For example, we have to skip 4 bytes (1 value) to
move to the next column, but 20 bytes (5 values) to get to the same
position in the next row. As such, the strides for the array `x` will be
``(20, 4)``.
See Also
--------
numpy.lib.stride_tricks.as_strided
Examples
--------
>>> y = np.reshape(np.arange(2*3*4), (2,3,4))
>>> y
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
>>> y.strides
(48, 16, 4)
>>> y[1,1,1]
17
>>> offset=sum(y.strides * np.array((1,1,1)))
>>> offset/y.itemsize
17
>>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0)
>>> x.strides
(32, 4, 224, 1344)
>>> i = np.array([3,5,2,2])
>>> offset = sum(i * x.strides)
>>> x[3,5,2,2]
813
>>> offset / x.itemsize
813
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
"""
Same as self.transpose(), except that self is returned if
self.ndim < 2.
Examples
--------
>>> x = np.array([[1.,2.],[3.,4.]])
>>> x
array([[ 1., 2.],
[ 3., 4.]])
>>> x.T
array([[ 1., 3.],
[ 2., 4.]])
>>> x = np.array([1.,2.,3.,4.])
>>> x
array([ 1., 2., 3., 4.])
>>> x.T
array([ 1., 2., 3., 4.])
"""))
##############################################################################
#
# ndarray methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
""" a.__array__(|dtype) -> reference if type unchanged, copy otherwise.
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
"""a.__array_prepare__(obj) -> Object of same type as ndarray object obj.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
"""a.__array_wrap__(obj) -> Object of same type as ndarray object a.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
"""a.__copy__()
Used if :func:`copy.copy` is called on an array. Returns a copy of the array.
Equivalent to ``a.copy(order='K')``.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
"""a.__deepcopy__(memo, /) -> Deep copy of array.
Used if :func:`copy.deepcopy` is called on an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
"""a.__reduce__()
For pickling.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
"""a.__setstate__(state, /)
For unpickling.
The `state` argument must be a sequence that contains the following
elements:
Parameters
----------
version : int
optional pickle version. If omitted defaults to 0.
shape : tuple
dtype : data-type
isFortran : bool
rawdata : string or list
a binary string with the data (or a list if 'a' is an object array)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
"""
a.all(axis=None, out=None, keepdims=False)
Returns True if all elements evaluate to True.
Refer to `numpy.all` for full documentation.
See Also
--------
numpy.all : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
"""
a.any(axis=None, out=None, keepdims=False)
Returns True if any of the elements of `a` evaluate to True.
Refer to `numpy.any` for full documentation.
See Also
--------
numpy.any : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
"""
a.argmax(axis=None, out=None)
Return indices of the maximum values along the given axis.
Refer to `numpy.argmax` for full documentation.
See Also
--------
numpy.argmax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
"""
a.argmin(axis=None, out=None)
Return indices of the minimum values along the given axis of `a`.
Refer to `numpy.argmin` for detailed documentation.
See Also
--------
numpy.argmin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""
a.argsort(axis=-1, kind='quicksort', order=None)
Returns the indices that would sort this array.
Refer to `numpy.argsort` for full documentation.
See Also
--------
numpy.argsort : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition',
"""
a.argpartition(kth, axis=-1, kind='introselect', order=None)
Returns the indices that would partition this array.
Refer to `numpy.argpartition` for full documentation.
.. versionadded:: 1.8.0
See Also
--------
numpy.argpartition : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
"""
a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default), otherwise
the returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to false, and the `dtype`, `order`, and `subok`
requirements are satisfied, the input array is returned instead
of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input parameter), `arr_t`
is a new array of the same shape as the input array, with dtype, order
given by `dtype`, `order`.
Notes
-----
Starting in NumPy 1.9, astype method now returns an error if the string
dtype to cast to is not long enough in 'safe' casting mode to hold the max
value of integer/float array that is being casted. Previously the casting
was allowed even if the result was truncated.
Raises
------
ComplexWarning
When casting from complex to float or int. To avoid this,
one should use ``a.real.astype(t)``.
Examples
--------
>>> x = np.array([1, 2, 2.5])
>>> x
array([ 1. , 2. , 2.5])
>>> x.astype(int)
array([1, 2, 2])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""
a.byteswap(inplace=False)
Swap the bytes of the array elements
Toggle between low-endian and big-endian data representation by
returning a byteswapped array, optionally swapped in-place.
Parameters
----------
inplace : bool, optional
If ``True``, swap bytes in-place, default is ``False``.
Returns
-------
out : ndarray
The byteswapped array. If `inplace` is ``True``, this is
a view to self.
Examples
--------
>>> A = np.array([1, 256, 8755], dtype=np.int16)
>>> map(hex, A)
['0x1', '0x100', '0x2233']
>>> A.byteswap(inplace=True)
array([ 256, 1, 13090], dtype=int16)
>>> map(hex, A)
['0x100', '0x1', '0x3322']
Arrays of strings are not swapped
>>> A = np.array(['ceg', 'fac'])
>>> A.byteswap()
array(['ceg', 'fac'],
dtype='|S3')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
"""
a.choose(choices, out=None, mode='raise')
Use an index array to construct a new array from a set of choices.
Refer to `numpy.choose` for full documentation.
See Also
--------
numpy.choose : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""
a.clip(min=None, max=None, out=None)
Return an array whose values are limited to ``[min, max]``.
One of max or min must be given.
Refer to `numpy.clip` for full documentation.
See Also
--------
numpy.clip : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
"""
a.compress(condition, axis=None, out=None)
Return selected slices of this array along given axis.
Refer to `numpy.compress` for full documentation.
See Also
--------
numpy.compress : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
"""
a.conj()
Complex-conjugate all elements.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
"""
a.conjugate()
Return the complex conjugate, element-wise.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
"""
a.copy(order='C')
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :func:`numpy.copy` are very
similar, but have different default values for their order=
arguments.)
See also
--------
numpy.copy
numpy.copyto
Examples
--------
>>> x = np.array([[1,2,3],[4,5,6]], order='F')
>>> y = x.copy()
>>> x.fill(0)
>>> x
array([[0, 0, 0],
[0, 0, 0]])
>>> y
array([[1, 2, 3],
[4, 5, 6]])
>>> y.flags['C_CONTIGUOUS']
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
"""
a.cumprod(axis=None, dtype=None, out=None)
Return the cumulative product of the elements along the given axis.
Refer to `numpy.cumprod` for full documentation.
See Also
--------
numpy.cumprod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
"""
a.cumsum(axis=None, dtype=None, out=None)
Return the cumulative sum of the elements along the given axis.
Refer to `numpy.cumsum` for full documentation.
See Also
--------
numpy.cumsum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
"""
a.diagonal(offset=0, axis1=0, axis2=1)
Return specified diagonals. In NumPy 1.9 the returned array is a
read-only view instead of a copy as in previous NumPy versions. In
a future version the read-only restriction will be removed.
Refer to :func:`numpy.diagonal` for full documentation.
See Also
--------
numpy.diagonal : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dot',
"""
a.dot(b, out=None)
Dot product of two arrays.
Refer to `numpy.dot` for full documentation.
See Also
--------
numpy.dot : equivalent function
Examples
--------
>>> a = np.eye(2)
>>> b = np.ones((2, 2)) * 2
>>> a.dot(b)
array([[ 2., 2.],
[ 2., 2.]])
This array method can be conveniently chained:
>>> a.dot(b).dot(b)
array([[ 8., 8.],
[ 8., 8.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
"""a.dump(file)
Dump a pickle of the array to the specified file.
The array can be read back with pickle.load or numpy.load.
Parameters
----------
file : str
A string naming the dump file.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
"""
a.dumps()
Returns the pickle of the array as a string.
pickle.loads or numpy.loads will convert the string back to an array.
Parameters
----------
None
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
"""
a.fill(value)
Fill the array with a scalar value.
Parameters
----------
value : scalar
All elements of `a` will be assigned this value.
Examples
--------
>>> a = np.array([1, 2])
>>> a.fill(0)
>>> a
array([0, 0])
>>> a = np.empty(2)
>>> a.fill(1)
>>> a
array([ 1., 1.])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
"""
a.flatten(order='C')
Return a copy of the array collapsed into one dimension.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order.
'F' means to flatten in column-major (Fortran-
style) order. 'A' means to flatten in column-major
order if `a` is Fortran *contiguous* in memory,
row-major order otherwise. 'K' means to flatten
`a` in the order the elements occur in memory.
The default is 'C'.
Returns
-------
y : ndarray
A copy of the input array, flattened to one dimension.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the array.
Examples
--------
>>> a = np.array([[1,2], [3,4]])
>>> a.flatten()
array([1, 2, 3, 4])
>>> a.flatten('F')
array([1, 3, 2, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
"""
a.getfield(dtype, offset=0)
Returns a field of the given array as a certain type.
A field is a view of the array data with a given data-type. The values in
the view are determined by the given type and the offset into the current
array in bytes. The offset needs to be such that the view dtype fits in the
array dtype; for example an array of dtype complex128 has 16-byte elements.
If taking a view with a 32-bit integer (4 bytes), the offset needs to be
between 0 and 12 bytes.
Parameters
----------
dtype : str or dtype
The data type of the view. The dtype size of the view can not be larger
than that of the array itself.
offset : int
Number of bytes to skip before beginning the element view.
Examples
--------
>>> x = np.diag([1.+1.j]*2)
>>> x[1, 1] = 2 + 4.j
>>> x
array([[ 1.+1.j, 0.+0.j],
[ 0.+0.j, 2.+4.j]])
>>> x.getfield(np.float64)
array([[ 1., 0.],
[ 0., 2.]])
By choosing an offset of 8 bytes we can select the complex part of the
array for our view:
>>> x.getfield(np.float64, offset=8)
array([[ 1., 0.],
[ 0., 4.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
"""
a.item(*args)
Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
\\*args : Arguments (variable number and type)
* none: in this case, the method only works for arrays
with one element (`a.size == 1`), which element is
copied into a standard Python scalar object and returned.
* int_type: this argument is interpreted as a flat index into
the array, specifying which element to copy and return.
* tuple of int_types: functions as does a single int_type argument,
except that the argument is interpreted as an nd-index into the
array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable
Python scalar
Notes
-----
When the data type of `a` is longdouble or clongdouble, item() returns
a scalar array object because there is no available Python scalar that
would not lose information. Void arrays return a buffer object for item(),
unless fields are defined, in which case a tuple is returned.
`item` is very similar to a[args], except, instead of an array scalar,
a standard Python scalar is returned. This can be useful for speeding up
access to elements of the array and doing arithmetic on elements of the
array using Python's optimized math.
Examples
--------
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[3, 1, 7],
[2, 8, 3],
[8, 5, 3]])
>>> x.item(3)
2
>>> x.item(7)
5
>>> x.item((0, 1))
1
>>> x.item((2, 2))
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
"""
a.itemset(*args)
Insert scalar into an array (scalar is cast to array's dtype, if possible)
There must be at least 1 argument, and define the last argument
as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster
than ``a[args] = item``. The item should be a scalar value and `args`
must select a single item in the array `a`.
Parameters
----------
\\*args : Arguments
If one argument: a scalar, only used in case `a` is of size 1.
If two arguments: the last argument is the value to be set
and must be a scalar, the first argument specifies a single array
element location. It is either an int or a tuple.
Notes
-----
Compared to indexing syntax, `itemset` provides some speed increase
for placing a scalar into a particular location in an `ndarray`,
if you must do this. However, generally this is discouraged:
among other problems, it complicates the appearance of the code.
Also, when using `itemset` (and `item`) inside a loop, be sure
to assign the methods to a local variable to avoid the attribute
look-up at each loop iteration.
Examples
--------
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[3, 1, 7],
[2, 8, 3],
[8, 5, 3]])
>>> x.itemset(4, 0)
>>> x.itemset((2, 2), 9)
>>> x
array([[3, 1, 7],
[2, 0, 3],
[8, 5, 9]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""
a.max(axis=None, out=None, keepdims=False)
Return the maximum along a given axis.
Refer to `numpy.amax` for full documentation.
See Also
--------
numpy.amax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
"""
a.mean(axis=None, dtype=None, out=None, keepdims=False)
Returns the average of the array elements along given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""
a.min(axis=None, out=None, keepdims=False)
Return the minimum along a given axis.
Refer to `numpy.amin` for full documentation.
See Also
--------
numpy.amin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'shares_memory',
"""
shares_memory(a, b, max_work=None)
Determine if two arrays share memory
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem (maximum number
of candidate solutions to consider). The following special
values are recognized:
max_work=MAY_SHARE_EXACT (default)
The problem is solved exactly. In this case, the function returns
True only if there is an element shared between the arrays.
max_work=MAY_SHARE_BOUNDS
Only the memory bounds of a and b are checked.
Raises
------
numpy.TooHardError
Exceeded max_work.
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
""")
add_newdoc('numpy.core.multiarray', 'may_share_memory',
"""
may_share_memory(a, b, max_work=None)
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem. See
`shares_memory` for details. Default for ``may_share_memory``
is to do a bounds check.
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S')
Return the array with the same data viewed with a different byte order.
Equivalent to::
arr.view(arr.dtype.newbytorder(new_order))
Changes are also made in all fields and sub-arrays of the array data
type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_arr : array
New array object with the dtype reflecting given change to the
byte order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
"""
a.nonzero()
Return the indices of the elements that are non-zero.
Refer to `numpy.nonzero` for full documentation.
See Also
--------
numpy.nonzero : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""
a.prod(axis=None, dtype=None, out=None, keepdims=False)
Return the product of the array elements over the given axis
Refer to `numpy.prod` for full documentation.
See Also
--------
numpy.prod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
"""
a.ptp(axis=None, out=None, keepdims=False)
Peak to peak (maximum - minimum) value along a given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""
a.put(indices, values, mode='raise')
Set ``a.flat[n] = values[n]`` for all `n` in indices.
Refer to `numpy.put` for full documentation.
See Also
--------
numpy.put : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'copyto',
"""
copyto(dst, src, casting='same_kind', where=True)
Copies values from one array to another, broadcasting as necessary.
Raises a TypeError if the `casting` rule is violated, and if
`where` is provided, it selects which elements to copy.
.. versionadded:: 1.7.0
Parameters
----------
dst : ndarray
The array into which values are copied.
src : array_like
The array from which values are copied.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when copying.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `dst`, and selects elements to copy from `src` to `dst`
wherever it contains the value True.
""")
add_newdoc('numpy.core.multiarray', 'putmask',
"""
putmask(a, mask, values)
Changes elements of an array based on conditional and input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
This gives behavior different from ``a[mask] = values``.
Parameters
----------
a : array_like
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
values : array_like
Values to put into `a` where `mask` is True. If `values` is smaller
than `a` it will be repeated.
See Also
--------
place, put, take, copyto
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> np.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = np.arange(5)
>>> np.putmask(x, x>1, [-33, -44])
>>> x
array([ 0, 1, -33, -44, -33])
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""
a.ravel([order])
Return a flattened array.
Refer to `numpy.ravel` for full documentation.
See Also
--------
numpy.ravel : equivalent function
ndarray.flat : a flat iterator on the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
"""
a.repeat(repeats, axis=None)
Repeat elements of an array.
Refer to `numpy.repeat` for full documentation.
See Also
--------
numpy.repeat : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
"""
a.reshape(shape, order='C')
Returns an array containing the same data with a new shape.
Refer to `numpy.reshape` for full documentation.
See Also
--------
numpy.reshape : equivalent function
Notes
-----
Unlike the free function `numpy.reshape`, this method on `ndarray` allows
the elements of the shape parameter to be passed in as separate arguments.
For example, ``a.reshape(10, 11)`` is equivalent to
``a.reshape((10, 11))``.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
"""
a.resize(new_shape, refcheck=True)
Change shape and size of array in-place.
Parameters
----------
new_shape : tuple of ints, or `n` ints
Shape of resized array.
refcheck : bool, optional
If False, reference count will not be checked. Default is True.
Returns
-------
None
Raises
------
ValueError
If `a` does not own its own data or references or views to it exist,
and the data memory must be changed.
PyPy only: will always raise if the data memory must be changed, since
there is no reliable way to determine if references or views to it
exist.
SystemError
If the `order` keyword argument is specified. This behaviour is a
bug in NumPy.
See Also
--------
resize : Return a new array with the specified shape.
Notes
-----
This reallocates space for the data area if necessary.
Only contiguous arrays (data elements consecutive in memory) can be
resized.
The purpose of the reference count check is to make sure you
do not use this array as a buffer for another Python object and then
reallocate the memory. However, reference counts can increase in
other ways so if you are sure that you have not shared the memory
for this array with another Python object, then you may safely set
`refcheck` to False.
Examples
--------
Shrinking an array: array is flattened (in the order that the data are
stored in memory), resized, and reshaped:
>>> a = np.array([[0, 1], [2, 3]], order='C')
>>> a.resize((2, 1))
>>> a
array([[0],
[1]])
>>> a = np.array([[0, 1], [2, 3]], order='F')
>>> a.resize((2, 1))
>>> a
array([[0],
[2]])
Enlarging an array: as above, but missing entries are filled with zeros:
>>> b = np.array([[0, 1], [2, 3]])
>>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
>>> b
array([[0, 1, 2],
[3, 0, 0]])
Referencing an array prevents resizing...
>>> c = a
>>> a.resize((1, 1))
Traceback (most recent call last):
...
ValueError: cannot resize an array that has been referenced ...
Unless `refcheck` is False:
>>> a.resize((1, 1), refcheck=False)
>>> a
array([[0]])
>>> c
array([[0]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
"""
a.round(decimals=0, out=None)
Return `a` with each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
"""
a.searchsorted(v, side='left', sorter=None)
Find indices where elements of v should be inserted in a to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
"""
a.setfield(val, dtype, offset=0)
Put a value into a specified place in a field defined by a data-type.
Place `val` into `a`'s field defined by `dtype` and beginning `offset`
bytes into the field.
Parameters
----------
val : object
Value to be placed in field.
dtype : dtype object
Data-type of the field in which to place `val`.
offset : int, optional
The number of bytes into the field at which to place `val`.
Returns
-------
None
See Also
--------
getfield
Examples
--------
>>> x = np.eye(3)
>>> x.getfield(np.float64)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> x.setfield(3, np.int32)
>>> x.getfield(np.int32)
array([[3, 3, 3],
[3, 3, 3],
[3, 3, 3]])
>>> x
array([[ 1.00000000e+000, 1.48219694e-323, 1.48219694e-323],
[ 1.48219694e-323, 1.00000000e+000, 1.48219694e-323],
[ 1.48219694e-323, 1.48219694e-323, 1.00000000e+000]])
>>> x.setfield(np.eye(3), np.int32)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
"""
a.setflags(write=None, align=None, uic=None)
Set array flags WRITEABLE, ALIGNED, (WRITEBACKIFCOPY and UPDATEIFCOPY),
respectively.
These Boolean-valued flags affect how numpy interprets the memory
area used by `a` (see Notes below). The ALIGNED flag can only
be set to True if the data is actually aligned according to the type.
The WRITEBACKIFCOPY and (deprecated) UPDATEIFCOPY flags can never be set
to True. The flag WRITEABLE can only be set to True if the array owns its
own memory, or the ultimate owner of the memory exposes a writeable buffer
interface, or is a string. (The exception for string is made so that
unpickling can be done without copying memory.)
Parameters
----------
write : bool, optional
Describes whether or not `a` can be written to.
align : bool, optional
Describes whether or not `a` is aligned properly for its type.
uic : bool, optional
Describes whether or not `a` is a copy of another "base" array.
Notes
-----
Array flags provide information about how the memory area used
for the array is to be interpreted. There are 7 Boolean flags
in use, only four of which can be changed by the user:
WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED.
WRITEABLE (W) the data area can be written to;
ALIGNED (A) the data and strides are aligned appropriately for the hardware
(as determined by the compiler);
UPDATEIFCOPY (U) (deprecated), replaced by WRITEBACKIFCOPY;
WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced
by .base). When the C-API function PyArray_ResolveWritebackIfCopy is
called, the base array will be updated with the contents of this array.
All flags can be accessed using the single (upper case) letter as well
as the full name.
Examples
--------
>>> y
array([[3, 1, 7],
[2, 0, 0],
[8, 5, 9]])
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
>>> y.setflags(write=0, align=0)
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : False
ALIGNED : False
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
>>> y.setflags(uic=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: cannot set WRITEBACKIFCOPY flag to True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""
a.sort(axis=-1, kind='quicksort', order=None)
Sort an array, in-place.
Parameters
----------
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. Default is 'quicksort'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.sort : Return a sorted copy of an array.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in sorted array.
partition: Partial sort.
Notes
-----
See ``sort`` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4], [3,1]])
>>> a.sort(axis=1)
>>> a
array([[1, 4],
[1, 3]])
>>> a.sort(axis=0)
>>> a
array([[1, 3],
[1, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
>>> a.sort(order='y')
>>> a
array([('c', 1), ('a', 2)],
dtype=[('x', '|S1'), ('y', '<i4')])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
"""
a.partition(kth, axis=-1, kind='introselect', order=None)
Rearranges the elements in the array in such a way that the value of the
element in kth position is in the position it would be in a sorted array.
All elements smaller than the kth element are moved before this element and
all equal or greater are moved behind it. The ordering of the elements in
the two partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
kth : int or sequence of ints
Element index to partition by. The kth element value will be in its
final sorted position and all smaller elements will be moved before it
and all equal or greater elements behind it.
The order of all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need to be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.partition : Return a parititioned copy of an array.
argpartition : Indirect partition.
sort : Full sort.
Notes
-----
See ``np.partition`` for notes on the different algorithms.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> a.partition(3)
>>> a
array([2, 1, 3, 4])
>>> a.partition((1, 3))
array([1, 2, 3, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
"""
a.squeeze(axis=None)
Remove single-dimensional entries from the shape of `a`.
Refer to `numpy.squeeze` for full documentation.
See Also
--------
numpy.squeeze : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
"""
a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the standard deviation of the array elements along given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""
a.sum(axis=None, dtype=None, out=None, keepdims=False)
Return the sum of the array elements over the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
"""
a.swapaxes(axis1, axis2)
Return a view of the array with `axis1` and `axis2` interchanged.
Refer to `numpy.swapaxes` for full documentation.
See Also
--------
numpy.swapaxes : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
"""
a.take(indices, axis=None, out=None, mode='raise')
Return an array formed from the elements of `a` at the given indices.
Refer to `numpy.take` for full documentation.
See Also
--------
numpy.take : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
"""
a.tofile(fid, sep="", format="%s")
Write array to a file as text or binary (default).
Data is always written in 'C' order, independent of the order of `a`.
The data produced by this method can be recovered using the function
fromfile().
Parameters
----------
fid : file or str
An open file object, or a string containing a filename.
sep : str
Separator between array items for text output.
If "" (empty), a binary file is written, equivalent to
``file.write(a.tobytes())``.
format : str
Format string for text file output.
Each entry in the array is formatted to text by first converting
it to the closest Python type, and then using "format" % item.
Notes
-----
This is a convenience function for quick storage of array data.
Information on endianness and precision is lost, so this method is not a
good choice for files intended to archive data or transport data between
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
When fid is a file object, array contents are directly written to the
file, bypassing the file object's ``write`` method. As a result, tofile
cannot be used with files objects supporting compression (e.g., GzipFile)
or file-like objects that do not support ``fileno()`` (e.g., BytesIO).
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""
a.tolist()
Return the array as a (possibly nested) list.
Return a copy of the array data as a (nested) Python list.
Data items are converted to the nearest compatible Python type.
Parameters
----------
none
Returns
-------
y : list
The possibly nested list of array elements.
Notes
-----
The array may be recreated, ``a = np.array(a.tolist())``.
Examples
--------
>>> a = np.array([1, 2])
>>> a.tolist()
[1, 2]
>>> a = np.array([[1, 2], [3, 4]])
>>> list(a)
[array([1, 2]), array([3, 4])]
>>> a.tolist()
[[1, 2], [3, 4]]
"""))
tobytesdoc = """
a.{name}(order='C')
Construct Python bytes containing the raw data bytes in the array.
Constructs Python bytes showing a copy of the raw contents of
data memory. The bytes object can be produced in either 'C' or 'Fortran',
or 'Any' order (the default is 'C'-order). 'Any' order means C-order
unless the F_CONTIGUOUS flag in the array is set, in which case it
means 'Fortran' order.
{deprecated}
Parameters
----------
order : {{'C', 'F', None}}, optional
Order of the data for multidimensional arrays:
C, Fortran, or the same as for the original array.
Returns
-------
s : bytes
Python bytes exhibiting a copy of `a`'s raw data.
Examples
--------
>>> x = np.array([[0, 1], [2, 3]])
>>> x.tobytes()
b'\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
>>> x.tobytes('C') == x.tobytes()
True
>>> x.tobytes('F')
b'\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
"""
add_newdoc('numpy.core.multiarray', 'ndarray',
('tostring', tobytesdoc.format(name='tostring',
deprecated=
'This function is a compatibility '
'alias for tobytes. Despite its '
'name it returns bytes not '
'strings.')))
add_newdoc('numpy.core.multiarray', 'ndarray',
('tobytes', tobytesdoc.format(name='tobytes',
deprecated='.. versionadded:: 1.9.0')))
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
"""
a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
Return the sum along diagonals of the array.
Refer to `numpy.trace` for full documentation.
See Also
--------
numpy.trace : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
"""
a.transpose(*axes)
Returns a view of the array with axes transposed.
For a 1-D array, this has no effect. (To change between column and
row vectors, first cast the 1-D array into a matrix object.)
For a 2-D array, this is the usual matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted (see Examples). If axes are not provided and
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
Parameters
----------
axes : None, tuple of ints, or `n` ints
* None or no argument: reverses the order of the axes.
* tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
`i`-th axis becomes `a.transpose()`'s `j`-th axis.
* `n` ints: same as an n-tuple of the same ints (this form is
intended simply as a "convenience" alternative to the tuple form)
Returns
-------
out : ndarray
View of `a`, with axes suitably permuted.
See Also
--------
ndarray.T : Array property returning the array transposed.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> a
array([[1, 2],
[3, 4]])
>>> a.transpose()
array([[1, 3],
[2, 4]])
>>> a.transpose((1, 0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1, 0)
array([[1, 3],
[2, 4]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
"""
a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the variance of the array elements, along given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""
a.view(dtype=None, type=None)
New view of array with the same data.
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16. The
default, None, results in the view having the same data-type as `a`.
This argument can also be specified as an ndarray sub-class, which
then specifies the type of the returned object (this is equivalent to
setting the ``type`` parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, the
default None results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
Examples
--------
>>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
Viewing array data using a different type and dtype:
>>> y = x.view(dtype=np.int16, type=np.matrix)
>>> y
matrix([[513]], dtype=int16)
>>> print(type(y))
<class 'numpy.matrixlib.defmatrix.matrix'>
Creating a view on a structured array so it can be used in calculations
>>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
>>> xv = x.view(dtype=np.int8).reshape(-1,2)
>>> xv
array([[1, 2],
[3, 4]], dtype=int8)
>>> xv.mean(0)
array([ 2., 3.])
Making changes to the view changes the underlying array
>>> xv[0,1] = 20
>>> print(x)
[(1, 20) (3, 4)]
Using a view to convert an array to a recarray:
>>> z = x.view(np.recarray)
>>> z.a
array([1], dtype=int8)
Views share data:
>>> x[0] = (9, 10)
>>> z[0]
(9, 10)
Views that change the dtype size (bytes per entry) should normally be
avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
>>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16)
>>> y = x[:, 0:2]
>>> y
array([[1, 2],
[4, 5]], dtype=int16)
>>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: new type not compatible with array.
>>> z = y.copy()
>>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
array([[(1, 2)],
[(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')])
"""))
##############################################################################
#
# umath functions
#
##############################################################################
add_newdoc('numpy.core.umath', 'frompyfunc',
"""
frompyfunc(func, nin, nout)
Takes an arbitrary Python function and returns a NumPy ufunc.
Can be used, for example, to add broadcasting to a built-in Python
function (see Examples section).
Parameters
----------
func : Python function object
An arbitrary Python function.
nin : int
The number of input arguments.
nout : int
The number of objects returned by `func`.
Returns
-------
out : ufunc
Returns a NumPy universal function (``ufunc``) object.
See Also
--------
vectorize : evaluates pyfunc over input arrays using broadcasting rules of numpy
Notes
-----
The returned ufunc always returns PyObject arrays.
Examples
--------
Use frompyfunc to add broadcasting to the Python function ``oct``:
>>> oct_array = np.frompyfunc(oct, 1, 1)
>>> oct_array(np.array((10, 30, 100)))
array([012, 036, 0144], dtype=object)
>>> np.array((oct(10), oct(30), oct(100))) # for comparison
array(['012', '036', '0144'],
dtype='|S4')
""")
add_newdoc('numpy.core.umath', 'geterrobj',
"""
geterrobj()
Return the current object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `geterrobj` is used internally by the other
functions that get and set error handling behavior (`geterr`, `seterr`,
`geterrcall`, `seterrcall`).
Returns
-------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
seterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrobj() # first get the defaults
[10000, 0, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> old_bufsize = np.setbufsize(20000)
>>> old_err = np.seterr(divide='raise')
>>> old_handler = np.seterrcall(err_handler)
>>> np.geterrobj()
[20000, 2, <function err_handler at 0x91dcaac>]
>>> old_err = np.seterr(all='ignore')
>>> np.base_repr(np.geterrobj()[1], 8)
'0'
>>> old_err = np.seterr(divide='warn', over='log', under='call',
invalid='print')
>>> np.base_repr(np.geterrobj()[1], 8)
'4351'
""")
add_newdoc('numpy.core.umath', 'seterrobj',
"""
seterrobj(errobj)
Set the object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `seterrobj` is used internally by the other
functions that set error handling behavior (`seterr`, `seterrcall`).
Parameters
----------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
geterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> old_errobj = np.geterrobj() # first get the defaults
>>> old_errobj
[10000, 0, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> new_errobj = [20000, 12, err_handler]
>>> np.seterrobj(new_errobj)
>>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn')
'14'
>>> np.geterr()
{'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.geterrcall() is err_handler
True
""")
##############################################################################
#
# compiled_base functions
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'bincount',
"""
bincount(x, weights=None, minlength=0)
Count number of occurrences of each value in array of non-negative ints.
The number of bins (of size 1) is one larger than the largest value in
`x`. If `minlength` is specified, there will be at least this number
of bins in the output array (though it will be longer if necessary,
depending on the contents of `x`).
Each bin gives the number of occurrences of its index value in `x`.
If `weights` is specified the input array is weighted by it, i.e. if a
value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
of ``out[n] += 1``.
Parameters
----------
x : array_like, 1 dimension, nonnegative ints
Input array.
weights : array_like, optional
Weights, array of the same shape as `x`.
minlength : int, optional
A minimum number of bins for the output array.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray of ints
The result of binning the input array.
The length of `out` is equal to ``np.amax(x)+1``.
Raises
------
ValueError
If the input is not 1-dimensional, or contains elements with negative
values, or if `minlength` is negative.
TypeError
If the type of the input is float or complex.
See Also
--------
histogram, digitize, unique
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
The input array needs to be of integer dtype, otherwise a
TypeError is raised:
>>> np.bincount(np.arange(5, dtype=float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
A possible use of ``bincount`` is to perform sums over
variable-size chunks of an array, using the ``weights`` keyword.
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
""")
add_newdoc('numpy.core.multiarray', 'ravel_multi_index',
"""
ravel_multi_index(multi_index, dims, mode='raise', order='C')
Converts a tuple of index arrays into an array of flat
indices, applying boundary modes to the multi-index.
Parameters
----------
multi_index : tuple of array_like
A tuple of integer arrays, one array for each dimension.
dims : tuple of ints
The shape of array into which the indices from ``multi_index`` apply.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices are handled. Can specify
either one mode or a tuple of modes, one mode per index.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
In 'clip' mode, a negative index which would normally
wrap will clip to 0 instead.
order : {'C', 'F'}, optional
Determines whether the multi-index should be viewed as
indexing in row-major (C-style) or column-major
(Fortran-style) order.
Returns
-------
raveled_indices : ndarray
An array of indices into the flattened version of an array
of dimensions ``dims``.
See Also
--------
unravel_index
Notes
-----
.. versionadded:: 1.6.0
Examples
--------
>>> arr = np.array([[3,6,6],[4,5,1]])
>>> np.ravel_multi_index(arr, (7,6))
array([22, 41, 37])
>>> np.ravel_multi_index(arr, (7,6), order='F')
array([31, 41, 13])
>>> np.ravel_multi_index(arr, (4,6), mode='clip')
array([22, 23, 19])
>>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
array([12, 13, 13])
>>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
1621
""")
add_newdoc('numpy.core.multiarray', 'unravel_index',
"""
unravel_index(indices, shape, order='C')
Converts a flat index or array of flat indices into a tuple
of coordinate arrays.
Parameters
----------
indices : array_like
An integer array whose elements are indices into the flattened
version of an array of dimensions ``shape``. Before version 1.6.0,
this function accepted just one index value.
shape : tuple of ints
The shape of the array to use for unraveling ``indices``.
.. versionchanged:: 1.16.0
Renamed from ``dims`` to ``shape``.
order : {'C', 'F'}, optional
Determines whether the indices should be viewed as indexing in
row-major (C-style) or column-major (Fortran-style) order.
.. versionadded:: 1.6.0
Returns
-------
unraveled_coords : tuple of ndarray
Each array in the tuple has the same shape as the ``indices``
array.
See Also
--------
ravel_multi_index
Examples
--------
>>> np.unravel_index([22, 41, 37], (7,6))
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index([31, 41, 13], (7,6), order='F')
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index(1621, (6,7,8,9))
(3, 1, 4, 1)
""")
add_newdoc('numpy.core.multiarray', 'add_docstring',
"""
add_docstring(obj, docstring)
Add a docstring to a built-in obj if possible.
If the obj already has a docstring raise a RuntimeError
If this routine does not know how to add a docstring to the object
raise a TypeError
""")
add_newdoc('numpy.core.umath', '_add_newdoc_ufunc',
"""
add_ufunc_docstring(ufunc, new_docstring)
Replace the docstring for a ufunc with new_docstring.
This method will only work if the current docstring for
the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.)
Parameters
----------
ufunc : numpy.ufunc
A ufunc whose current doc is NULL.
new_docstring : string
The new docstring for the ufunc.
Notes
-----
This method allocates memory for new_docstring on
the heap. Technically this creates a mempory leak, since this
memory will not be reclaimed until the end of the program
even if the ufunc itself is removed. However this will only
be a problem if the user is repeatedly creating ufuncs with
no documentation, adding documentation via add_newdoc_ufunc,
and then throwing away the ufunc.
""")
add_newdoc('numpy.core.multiarray', 'packbits',
"""
packbits(myarray, axis=None)
Packs the elements of a binary-valued array into bits in a uint8 array.
The result is padded to full bytes by inserting zero bits at the end.
Parameters
----------
myarray : array_like
An array of integers or booleans whose elements should be packed to
bits.
axis : int, optional
The dimension over which bit-packing is done.
``None`` implies packing the flattened array.
Returns
-------
packed : ndarray
Array of type uint8 whose elements represent bits corresponding to the
logical (0 or nonzero) value of the input elements. The shape of
`packed` has the same number of dimensions as the input (unless `axis`
is None, in which case the output is 1-D).
See Also
--------
unpackbits: Unpacks elements of a uint8 array into a binary-valued output
array.
Examples
--------
>>> a = np.array([[[1,0,1],
... [0,1,0]],
... [[1,1,0],
... [0,0,1]]])
>>> b = np.packbits(a, axis=-1)
>>> b
array([[[160],[64]],[[192],[32]]], dtype=uint8)
Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
and 32 = 0010 0000.
""")
add_newdoc('numpy.core.multiarray', 'unpackbits',
"""
unpackbits(myarray, axis=None)
Unpacks elements of a uint8 array into a binary-valued output array.
Each element of `myarray` represents a bit-field that should be unpacked
into a binary-valued output array. The shape of the output array is either
1-D (if `axis` is None) or the same shape as the input array with unpacking
done along the axis specified.
Parameters
----------
myarray : ndarray, uint8 type
Input array.
axis : int, optional
The dimension over which bit-unpacking is done.
``None`` implies unpacking the flattened array.
Returns
-------
unpacked : ndarray, uint8 type
The elements are binary-valued (0 or 1).
See Also
--------
packbits : Packs the elements of a binary-valued array into bits in a uint8
array.
Examples
--------
>>> a = np.array([[2], [7], [23]], dtype=np.uint8)
>>> a
array([[ 2],
[ 7],
[23]], dtype=uint8)
>>> b = np.unpackbits(a, axis=1)
>>> b
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
""")
add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g',
"""
format_float_OSprintf_g(val, precision)
Print a floating point scalar using the system's printf function,
equivalent to:
printf("%.*g", precision, val);
for half/float/double, or replacing 'g' by 'Lg' for longdouble. This
method is designed to help cross-validate the format_float_* methods.
Parameters
----------
val : python float or numpy floating scalar
Value to format.
precision : non-negative integer, optional
Precision given to printf.
Returns
-------
rep : string
The string representation of the floating point value
See Also
--------
format_float_scientific
format_float_positional
""")
##############################################################################
#
# Documentation for ufunc attributes and methods
#
##############################################################################
##############################################################################
#
# ufunc object
#
##############################################################################
add_newdoc('numpy.core', 'ufunc',
"""
Functions that operate element by element on whole arrays.
To see the documentation for a specific ufunc, use `info`. For
example, ``np.info(np.sin)``. Because ufuncs are written in C
(for speed) and linked into Python with NumPy's ufunc facility,
Python's help() function finds this page whenever help() is called
on a ufunc.
A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`.
Calling ufuncs:
===============
op(*x[, out], where=True, **kwargs)
Apply `op` to the arguments `*x` elementwise, broadcasting the arguments.
The broadcasting rules are:
* Dimensions of length 1 may be prepended to either array.
* Arrays may be repeated along dimensions of length 1.
Parameters
----------
*x : array_like
Input arrays.
out : ndarray, None, or tuple of ndarray and None, optional
Alternate array object(s) in which to put the result; if provided, it
must have a shape that the inputs broadcast to. A tuple of arrays
(possible only as a keyword argument) must have length equal to the
number of outputs; use `None` for uninitialized outputs to be
allocated by the ufunc.
where : array_like, optional
Values of True indicate to calculate the ufunc at that position, values
of False indicate to leave the value in the output alone. Note that if
an uninitialized return array is created via the default ``out=None``,
then the elements where the values are False will remain uninitialized.
**kwargs
For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`.
Returns
-------
r : ndarray or tuple of ndarray
`r` will have the shape that the arrays in `x` broadcast to; if `out` is
provided, it will be returned. If not, `r` will be allocated and
may contain uninitialized values. If the function has more than one
output, then the result will be a tuple of arrays.
""")
##############################################################################
#
# ufunc attributes
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('identity',
"""
The identity value.
Data attribute containing the identity element for the ufunc, if it has one.
If it does not, the attribute value is None.
Examples
--------
>>> np.add.identity
0
>>> np.multiply.identity
1
>>> np.power.identity
1
>>> print(np.exp.identity)
None
"""))
add_newdoc('numpy.core', 'ufunc', ('nargs',
"""
The number of arguments.
Data attribute containing the number of arguments the ufunc takes, including
optional ones.
Notes
-----
Typically this value will be one more than what you might expect because all
ufuncs take the optional "out" argument.
Examples
--------
>>> np.add.nargs
3
>>> np.multiply.nargs
3
>>> np.power.nargs
3
>>> np.exp.nargs
2
"""))
add_newdoc('numpy.core', 'ufunc', ('nin',
"""
The number of inputs.
Data attribute containing the number of arguments the ufunc treats as input.
Examples
--------
>>> np.add.nin
2
>>> np.multiply.nin
2
>>> np.power.nin
2
>>> np.exp.nin
1
"""))
add_newdoc('numpy.core', 'ufunc', ('nout',
"""
The number of outputs.
Data attribute containing the number of arguments the ufunc treats as output.
Notes
-----
Since all ufuncs can take output arguments, this will always be (at least) 1.
Examples
--------
>>> np.add.nout
1
>>> np.multiply.nout
1
>>> np.power.nout
1
>>> np.exp.nout
1
"""))
add_newdoc('numpy.core', 'ufunc', ('ntypes',
"""
The number of types.
The number of numerical NumPy types - of which there are 18 total - on which
the ufunc can operate.
See Also
--------
numpy.ufunc.types
Examples
--------
>>> np.add.ntypes
18
>>> np.multiply.ntypes
18
>>> np.power.ntypes
17
>>> np.exp.ntypes
7
>>> np.remainder.ntypes
14
"""))
add_newdoc('numpy.core', 'ufunc', ('types',
"""
Returns a list with types grouped input->output.
Data attribute listing the data-type "Domain-Range" groupings the ufunc can
deliver. The data-types are given using the character codes.
See Also
--------
numpy.ufunc.ntypes
Examples
--------
>>> np.add.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.multiply.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.power.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G',
'OO->O']
>>> np.exp.types
['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
>>> np.remainder.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O']
"""))
add_newdoc('numpy.core', 'ufunc', ('signature',
"""
Definition of the core elements a generalized ufunc operates on.
The signature determines how the dimensions of each input/output array
are split into core and loop dimensions:
1. Each dimension in the signature is matched to a dimension of the
corresponding passed-in array, starting from the end of the shape tuple.
2. Core dimensions assigned to the same label in the signature must have
exactly matching sizes, no broadcasting is performed.
3. The core dimensions are removed from all inputs and the remaining
dimensions are broadcast together, defining the loop dimensions.
Notes
-----
Generalized ufuncs are used internally in many linalg functions, and in
the testing suite; the examples below are taken from these.
For ufuncs that operate on scalars, the signature is `None`, which is
equivalent to '()' for every argument.
Examples
--------
>>> np.core.umath_tests.matrix_multiply.signature
'(m,n),(n,p)->(m,p)'
>>> np.linalg._umath_linalg.det.signature
'(m,m)->()'
>>> np.add.signature is None
True # equivalent to '(),()->()'
"""))
##############################################################################
#
# ufunc methods
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
reduce(a, axis=0, dtype=None, out=None, keepdims=False, initial)
Reduces `a`'s dimension by one, by applying ufunc along one axis.
Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
:math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
For a one-dimensional array, reduce produces results equivalent to:
::
r = op.identity # op = ufunc
for i in range(len(A)):
r = op(r, A[i])
return r
For example, add.reduce() is equivalent to sum().
Parameters
----------
a : array_like
The array to act on.
axis : None or int or tuple of ints, optional
Axis or axes along which a reduction is performed.
The default (`axis` = 0) is perform a reduction over the first
dimension of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is `None`, a reduction is performed over all the axes.
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
For operations which are either not commutative or not associative,
doing a reduction over multiple axes is not well-defined. The
ufuncs do not currently raise an exception in this case, but will
likely do so in the future.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data-type of the output array if this is provided, or
the data-type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned. For consistency with
:ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.7.0
initial : scalar, optional
The value with which to start the reduction.
If the ufunc has no identity or the dtype is object, this defaults
to None - otherwise it defaults to ufunc.identity.
If ``None`` is given, the first element of the reduction is used,
and an error is thrown if the reduction is empty.
.. versionadded:: 1.15.0
Returns
-------
r : ndarray
The reduced array. If `out` was supplied, `r` is a reference to it.
Examples
--------
>>> np.multiply.reduce([2,3,5])
30
A multi-dimensional array example:
>>> X = np.arange(8).reshape((2,2,2))
>>> X
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.add.reduce(X, 0)
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X) # confirm: default axis value is 0
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X, 1)
array([[ 2, 4],
[10, 12]])
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
You can use the ``initial`` keyword argument to initialize the reduction with a
different value.
>>> np.add.reduce([10], initial=5)
15
>>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initializer=10)
array([14., 14.])
Allows reductions of empty arrays where they would normally fail, i.e.
for ufuncs without an identity.
>>> np.minimum.reduce([], initial=np.inf)
inf
>>> np.minimum.reduce([])
Traceback (most recent call last):
...
ValueError: zero-size array to reduction operation minimum which has no identity
"""))
add_newdoc('numpy.core', 'ufunc', ('accumulate',
"""
accumulate(array, axis=0, dtype=None, out=None)
Accumulate the result of applying the operator to all elements.
For a one-dimensional array, accumulate produces results equivalent to::
r = np.empty(len(A))
t = op.identity # op = the ufunc being applied to A's elements
for i in range(len(A)):
t = op(t, A[i])
r[i] = t
return r
For example, add.accumulate() is equivalent to np.cumsum().
For a multi-dimensional array, accumulate is applied along only one
axis (axis zero by default; see Examples below) so repeated use is
necessary if one wants to accumulate over multiple axes.
Parameters
----------
array : array_like
The array to act on.
axis : int, optional
The axis along which to apply the accumulation; default is zero.
dtype : data-type code, optional
The data-type used to represent the intermediate results. Defaults
to the data-type of the output array if such is provided, or the
the data-type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned. For consistency with
:ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
Returns
-------
r : ndarray
The accumulated values. If `out` was supplied, `r` is a reference to
`out`.
Examples
--------
1-D array examples:
>>> np.add.accumulate([2, 3, 5])
array([ 2, 5, 10])
>>> np.multiply.accumulate([2, 3, 5])
array([ 2, 6, 30])
2-D array examples:
>>> I = np.eye(2)
>>> I
array([[ 1., 0.],
[ 0., 1.]])
Accumulate along axis 0 (rows), down columns:
>>> np.add.accumulate(I, 0)
array([[ 1., 0.],
[ 1., 1.]])
>>> np.add.accumulate(I) # no axis specified = axis zero
array([[ 1., 0.],
[ 1., 1.]])
Accumulate along axis 1 (columns), through rows:
>>> np.add.accumulate(I, 1)
array([[ 1., 1.],
[ 0., 1.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('reduceat',
"""
reduceat(a, indices, axis=0, dtype=None, out=None)
Performs a (local) reduce with specified slices over a single axis.
For i in ``range(len(indices))``, `reduceat` computes
``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th
generalized "row" parallel to `axis` in the final result (i.e., in a
2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
`axis = 1`, it becomes the i-th column). There are three exceptions to this:
* when ``i = len(indices) - 1`` (so for the last index),
``indices[i+1] = a.shape[axis]``.
* if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
simply ``a[indices[i]]``.
* if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised.
The shape of the output depends on the size of `indices`, and may be
larger than `a` (this happens if ``len(indices) > a.shape[axis]``).
Parameters
----------
a : array_like
The array to act on.
indices : array_like
Paired indices, comma separated (not colon), specifying slices to
reduce.
axis : int, optional
The axis along which to apply the reduceat.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data type of the output array if this is provided, or
the data type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned. For consistency with
:ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
Returns
-------
r : ndarray
The reduced values. If `out` was supplied, `r` is a reference to
`out`.
Notes
-----
A descriptive example:
If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as
``ufunc.reduceat(a, indices)[::2]`` where `indices` is
``range(len(array) - 1)`` with a zero placed
in every other element:
``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``.
Don't be fooled by this attribute's name: `reduceat(a)` is not
necessarily smaller than `a`.
Examples
--------
To take the running sum of four successive values:
>>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
array([ 6, 10, 14, 18])
A 2-D example:
>>> x = np.linspace(0, 15, 16).reshape(4,4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
::
# reduce such that the result has the following five rows:
# [row1 + row2 + row3]
# [row4]
# [row2]
# [row3]
# [row1 + row2 + row3 + row4]
>>> np.add.reduceat(x, [0, 3, 1, 2, 0])
array([[ 12., 15., 18., 21.],
[ 12., 13., 14., 15.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 24., 28., 32., 36.]])
::
# reduce such that result has the following two columns:
# [col1 * col2 * col3, col4]
>>> np.multiply.reduceat(x, [0, 3], 1)
array([[ 0., 3.],
[ 120., 7.],
[ 720., 11.],
[ 2184., 15.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('outer',
"""
outer(A, B, **kwargs)
Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of
``op.outer(A, B)`` is an array of dimension M + N such that:
.. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] =
op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}])
For `A` and `B` one-dimensional, this is equivalent to::
r = empty(len(A),len(B))
for i in range(len(A)):
for j in range(len(B)):
r[i,j] = op(A[i], B[j]) # op = ufunc in question
Parameters
----------
A : array_like
First array
B : array_like
Second array
kwargs : any
Arguments to pass on to the ufunc. Typically `dtype` or `out`.
Returns
-------
r : ndarray
Output array
See Also
--------
numpy.outer
Examples
--------
>>> np.multiply.outer([1, 2, 3], [4, 5, 6])
array([[ 4, 5, 6],
[ 8, 10, 12],
[12, 15, 18]])
A multi-dimensional example:
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> A.shape
(2, 3)
>>> B = np.array([[1, 2, 3, 4]])
>>> B.shape
(1, 4)
>>> C = np.multiply.outer(A, B)
>>> C.shape; C
(2, 3, 1, 4)
array([[[[ 1, 2, 3, 4]],
[[ 2, 4, 6, 8]],
[[ 3, 6, 9, 12]]],
[[[ 4, 8, 12, 16]],
[[ 5, 10, 15, 20]],
[[ 6, 12, 18, 24]]]])
"""))
add_newdoc('numpy.core', 'ufunc', ('at',
"""
at(a, indices, b=None)
Performs unbuffered in place operation on operand 'a' for elements
specified by 'indices'. For addition ufunc, this method is equivalent to
``a[indices] += b``, except that results are accumulated for elements that
are indexed more than once. For example, ``a[[0,0]] += 1`` will only
increment the first element once because of buffering, whereas
``add.at(a, [0,0], 1)`` will increment the first element twice.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
The array to perform in place operation on.
indices : array_like or tuple
Array like index object or slice object for indexing into first
operand. If first operand has multiple dimensions, indices can be a
tuple of array like index objects or slice objects.
b : array_like
Second operand for ufuncs requiring two operands. Operand must be
broadcastable over first operand after indexing or slicing.
Examples
--------
Set items 0 and 1 to their negative values:
>>> a = np.array([1, 2, 3, 4])
>>> np.negative.at(a, [0, 1])
>>> print(a)
array([-1, -2, 3, 4])
Increment items 0 and 1, and increment item 2 twice:
>>> a = np.array([1, 2, 3, 4])
>>> np.add.at(a, [0, 1, 2, 2], 1)
>>> print(a)
array([2, 3, 5, 4])
Add items 0 and 1 in first array to second array,
and store results in first array:
>>> a = np.array([1, 2, 3, 4])
>>> b = np.array([1, 2])
>>> np.add.at(a, [0, 1], b)
>>> print(a)
array([2, 4, 3, 4])
"""))
##############################################################################
#
# Documentation for dtype attributes and methods
#
##############################################################################
##############################################################################
#
# dtype object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype',
"""
dtype(obj, align=False, copy=False)
Create a data type object.
A numpy array is homogeneous, and contains elements described by a
dtype object. A dtype object can be constructed from different
combinations of fundamental numeric types.
Parameters
----------
obj
Object to be converted to a data type object.
align : bool, optional
Add padding to the fields to match what a C compiler would output
for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
or a comma-separated string. If a struct dtype is being created,
this also sets a sticky alignment flag ``isalignedstruct``.
copy : bool, optional
Make a new copy of the data-type object. If ``False``, the result
may just be a reference to a built-in data-type object.
See also
--------
result_type
Examples
--------
Using array-scalar type:
>>> np.dtype(np.int16)
dtype('int16')
Structured type, one field name 'f1', containing int16:
>>> np.dtype([('f1', np.int16)])
dtype([('f1', '<i2')])
Structured type, one field named 'f1', in itself containing a structured
type with one field:
>>> np.dtype([('f1', [('f1', np.int16)])])
dtype([('f1', [('f1', '<i2')])])
Structured type, two fields: the first field contains an unsigned int, the
second an int32:
>>> np.dtype([('f1', np.uint), ('f2', np.int32)])
dtype([('f1', '<u4'), ('f2', '<i4')])
Using array-protocol type strings:
>>> np.dtype([('a','f8'),('b','S10')])
dtype([('a', '<f8'), ('b', '|S10')])
Using comma-separated field formats. The shape is (2,3):
>>> np.dtype("i4, (2,3)f8")
dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void``
is a flexible type, here of size 10:
>>> np.dtype([('hello',(int,3)),('world',np.void,10)])
dtype([('hello', '<i4', 3), ('world', '|V10')])
Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are
the offsets in bytes:
>>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
dtype(('<i2', [('x', '|i1'), ('y', '|i1')]))
Using dictionaries. Two fields named 'gender' and 'age':
>>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
dtype([('gender', '|S1'), ('age', '|u1')])
Offsets in bytes, here 0 and 25:
>>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
dtype([('surname', '|S25'), ('age', '|u1')])
""")
##############################################################################
#
# dtype attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
"""
The required alignment (bytes) of this data-type according to the compiler.
More information is available in the C-API section of the manual.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
"""
A character indicating the byte-order of this data-type object.
One of:
=== ==============
'=' native
'<' little-endian
'>' big-endian
'|' not applicable
=== ==============
All built-in data-type objects have byteorder either '=' or '|'.
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.byteorder
'='
>>> # endian is not relevant for 8 bit numbers
>>> np.dtype('i1').byteorder
'|'
>>> # or ASCII strings
>>> np.dtype('S2').byteorder
'|'
>>> # Even if specific code is given, and it is native
>>> # '=' is the byteorder
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> dt = np.dtype(native_code + 'i2')
>>> dt.byteorder
'='
>>> # Swapped code shows up as itself
>>> dt = np.dtype(swapped_code + 'i2')
>>> dt.byteorder == swapped_code
True
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('char',
"""A unique character code for each of the 21 different built-in types."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
"""
PEP3118 interface description of the data-type.
The format is that required by the 'descr' key in the
PEP3118 `__array_interface__` attribute.
Warning: This attribute exists specifically for PEP3118 compliance, and
is not a datatype description compatible with `np.dtype`.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
"""
Dictionary of named fields defined for this data type, or ``None``.
The dictionary is indexed by keys that are the names of the fields.
Each entry in the dictionary is a tuple fully describing the field::
(dtype, offset[, title])
Offset is limited to C int, which is signed and usually 32 bits.
If present, the optional title can be any object (if it is a string
or unicode then it will also be a key in the fields dictionary,
otherwise it's meta-data). Notice also that the first two elements
of the tuple can be passed directly as arguments to the ``ndarray.getfield``
and ``ndarray.setfield`` methods.
See Also
--------
ndarray.getfield, ndarray.setfield
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> print(dt.fields)
{'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('flags',
"""
Bit-flags describing how this data type is to be interpreted.
Bit-masks are in `numpy.core.multiarray` as the constants
`ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`,
`NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation
of these flags is in C-API documentation; they are largely useful
for user-defined data-types.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject',
"""
Boolean indicating whether this dtype contains any reference-counted
objects in any fields or sub-dtypes.
Recall that what is actually in the ndarray memory representing
the Python object is the memory address of that object (a pointer).
Special handling may be required, and this attribute is useful for
distinguishing data types that may contain arbitrary Python objects
and data-types that won't.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin',
"""
Integer indicating how this dtype relates to the built-in dtypes.
Read-only.
= ========================================================================
0 if this is a structured array type, with fields
1 if this is a dtype compiled into numpy (such as ints, floats etc)
2 if the dtype is for a user-defined numpy type
A user-defined type uses the numpy C-API machinery to extend
numpy to handle a new array type. See
:ref:`user.user-defined-data-types` in the NumPy manual.
= ========================================================================
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.isbuiltin
1
>>> dt = np.dtype('f8')
>>> dt.isbuiltin
1
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.isbuiltin
0
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isnative',
"""
Boolean indicating whether the byte order of this dtype is native
to the platform.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct',
"""
Boolean indicating whether the dtype is a struct which maintains
field alignment. This flag is sticky, so when combining multiple
structs together, it is preserved and produces new dtypes which
are also aligned.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
"""
The element size of this data-type object.
For 18 of the 21 types this number is fixed by the data-type.
For the flexible data-types, this number can be anything.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
"""
A character code (one of 'biufcmMOSUV') identifying the general kind of data.
= ======================
b boolean
i signed integer
u unsigned integer
f floating-point
c complex floating-point
m timedelta
M datetime
O object
S (byte-)string
U Unicode
V void
= ======================
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('name',
"""
A bit-width name for this data-type.
Un-sized flexible data-type objects do not have this attribute.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('names',
"""
Ordered list of field names, or ``None`` if there are no fields.
The names are ordered according to increasing byte offset. This can be
used, for example, to walk through all of the named fields in offset order.
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.names
('name', 'grades')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('num',
"""
A unique number for each of the 21 different built-in types.
These are roughly ordered from least-to-most precision.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
"""
Shape tuple of the sub-array if this data type describes a sub-array,
and ``()`` otherwise.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('ndim',
"""
Number of dimensions of the sub-array if this data type describes a
sub-array, and ``0`` otherwise.
.. versionadded:: 1.13.0
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('str',
"""The array-protocol typestring of this data-type object."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype',
"""
Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and
None otherwise.
The *shape* is the fixed shape of the sub-array described by this
data type, and *item_dtype* the data type of the array.
If a field whose dtype object has this attribute is retrieved,
then the extra dimensions implied by *shape* are tacked on to
the end of the retrieved array.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('type',
"""The type object used to instantiate a scalar of this data-type."""))
##############################################################################
#
# dtype methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new dtype with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. The default value ('S') results in swapping the current
byte order. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The code does a case-insensitive check on the first letter of
`new_order` for these alternatives. For example, any of '>'
or 'B' or 'b' or 'brian' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New dtype object with the given change to the byte order.
Notes
-----
Changes are also made in all fields and sub-arrays of the data type.
Examples
--------
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> native_dt = np.dtype(native_code+'i2')
>>> swapped_dt = np.dtype(swapped_code+'i2')
>>> native_dt.newbyteorder('S') == swapped_dt
True
>>> native_dt.newbyteorder() == swapped_dt
True
>>> native_dt == swapped_dt.newbyteorder('S')
True
>>> native_dt == swapped_dt.newbyteorder('=')
True
>>> native_dt == swapped_dt.newbyteorder('N')
True
>>> native_dt == native_dt.newbyteorder('|')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('<')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('L')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('>')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('B')
True
"""))
##############################################################################
#
# Datetime-related Methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'busdaycalendar',
"""
busdaycalendar(weekmask='1111100', holidays=None)
A business day calendar object that efficiently stores information
defining valid days for the busday family of functions.
The default valid days are Monday through Friday ("business days").
A busdaycalendar object can be specified with any set of weekly
valid days, plus an optional "holiday" dates that always will be invalid.
Once a busdaycalendar object is created, the weekmask and holidays
cannot be modified.
.. versionadded:: 1.7.0
Parameters
----------
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates, no matter which
weekday they fall upon. Holiday dates may be specified in any
order, and NaT (not-a-time) dates are ignored. This list is
saved in a normalized form that is suited for fast calculations
of valid days.
Returns
-------
out : busdaycalendar
A business day calendar object containing the specified
weekmask and holidays values.
See Also
--------
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Attributes
----------
Note: once a busdaycalendar object is created, you cannot modify the
weekmask or holidays. The attributes return copies of internal data.
weekmask : (copy) seven-element array of bool
holidays : (copy) sorted array of datetime64[D]
Examples
--------
>>> # Some important days in July
... bdd = np.busdaycalendar(
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
>>> # Default is Monday to Friday weekdays
... bdd.weekmask
array([ True, True, True, True, True, False, False], dtype='bool')
>>> # Any holidays already on the weekend are removed
... bdd.holidays
array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]')
""")
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask',
"""A copy of the seven-element boolean mask indicating valid days."""))
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays',
"""A copy of the holiday array indicating additional invalid days."""))
add_newdoc('numpy.core.multiarray', 'normalize_axis_index',
"""
normalize_axis_index(axis, ndim, msg_prefix=None)
Normalizes an axis index, `axis`, such that is a valid positive index into
the shape of array with `ndim` dimensions. Raises an AxisError with an
appropriate message if this is not possible.
Used internally by all axis-checking logic.
.. versionadded:: 1.13.0
Parameters
----------
axis : int
The un-normalized index of the axis. Can be negative
ndim : int
The number of dimensions of the array that `axis` should be normalized
against
msg_prefix : str
A prefix to put before the message, typically the name of the argument
Returns
-------
normalized_axis : int
The normalized axis index, such that `0 <= normalized_axis < ndim`
Raises
------
AxisError
If the axis index is invalid, when `-ndim <= axis < ndim` is false.
Examples
--------
>>> normalize_axis_index(0, ndim=3)
0
>>> normalize_axis_index(1, ndim=3)
1
>>> normalize_axis_index(-1, ndim=3)
2
>>> normalize_axis_index(3, ndim=3)
Traceback (most recent call last):
...
AxisError: axis 3 is out of bounds for array of dimension 3
>>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg')
Traceback (most recent call last):
...
AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3
""")
add_newdoc('numpy.core.multiarray', 'datetime_data',
"""
datetime_data(dtype, /)
Get information about the step size of a date or time type.
The returned tuple can be passed as the second argument of `numpy.datetime64` and
`numpy.timedelta64`.
Parameters
----------
dtype : dtype
The dtype object, which must be a `datetime64` or `timedelta64` type.
Returns
-------
unit : str
The :ref:`datetime unit <arrays.dtypes.dateunits>` on which this dtype
is based.
count : int
The number of base units in a step.
Examples
--------
>>> dt_25s = np.dtype('timedelta64[25s]')
>>> np.datetime_data(dt_25s)
('s', 25)
>>> np.array(10, dt_25s).astype('timedelta64[s]')
array(250, dtype='timedelta64[s]')
The result can be used to construct a datetime that uses the same units
as a timedelta
>>> np.datetime64('2010', np.datetime_data(dt_25s))
numpy.datetime64('2010-01-01T00:00:00', '25s')
""")
##############################################################################
#
# Documentation for `generic` attributes and methods
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'generic',
"""
Base class for numpy scalar types.
Class from which most (all?) numpy scalar types are derived. For
consistency, exposes the same API as `ndarray`, despite many
consequent attributes being either "get-only," or completely irrelevant.
This is the class from which it is strongly suggested users should derive
custom scalar types.
""")
# Attributes
add_newdoc('numpy.core.numerictypes', 'generic', ('T',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('base',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('data',
"""Pointer to start of data."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dtype',
"""Get array data-descriptor."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flags',
"""The integer value of flags."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flat',
"""A 1-D view of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('imag',
"""The imaginary part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize',
"""The length of one element in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes',
"""The length of the scalar in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ndim',
"""The number of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('real',
"""The real part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('shape',
"""Tuple of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('size',
"""The number of elements in the gentype."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('strides',
"""Tuple of bytes steps in each dimension."""))
# Methods
add_newdoc('numpy.core.numerictypes', 'generic', ('all',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('any',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmax',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmin',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argsort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('astype',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('choose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('clip',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('compress',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('copy',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dump',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dumps',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('fill',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flatten',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('getfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('item',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemset',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('max',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('mean',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('min',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new `dtype` with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
The `new_order` code can be any from the following:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
Parameters
----------
new_order : str, optional
Byte order to force; a value from the byte order specifications
above. The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New `dtype` object with the given change to the byte order.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('prod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ptp',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('put',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ravel',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('repeat',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('reshape',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('resize',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('round',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setflags',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('std',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('take',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tofile',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tolist',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tostring',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('trace',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('transpose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('var',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('view',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
##############################################################################
#
# Documentation for scalar type abstract base classes in type hierarchy
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'number',
"""
Abstract base class of all numeric scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'integer',
"""
Abstract base class of all integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'signedinteger',
"""
Abstract base class of all signed integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'unsignedinteger',
"""
Abstract base class of all unsigned integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'inexact',
"""
Abstract base class of all numeric scalar types with a (potentially)
inexact representation of the values in its range, such as
floating-point numbers.
""")
add_newdoc('numpy.core.numerictypes', 'floating',
"""
Abstract base class of all floating-point scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'complexfloating',
"""
Abstract base class of all complex number scalar types that are made up of
floating-point numbers.
""")
add_newdoc('numpy.core.numerictypes', 'flexible',
"""
Abstract base class of all scalar types without predefined length.
The actual size of these types depends on the specific `np.dtype`
instantiation.
""")
add_newdoc('numpy.core.numerictypes', 'character',
"""
Abstract base class of all character string scalar types.
""")
##############################################################################
#
# Documentation for concrete scalar classes
#
##############################################################################
def numeric_type_aliases(aliases):
def type_aliases_gen():
for alias, doc in aliases:
try:
alias_type = getattr(_numerictypes, alias)
except AttributeError:
# The set of aliases that actually exist varies between platforms
pass
else:
yield (alias_type, alias, doc)
return list(type_aliases_gen())
possible_aliases = numeric_type_aliases([
('int8', '8-bit signed integer (-128 to 127)'),
('int16', '16-bit signed integer (-32768 to 32767)'),
('int32', '32-bit signed integer (-2147483648 to 2147483647)'),
('int64', '64-bit signed integer (-9223372036854775808 to 9223372036854775807)'),
('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),
('uint8', '8-bit unsigned integer (0 to 255)'),
('uint16', '16-bit unsigned integer (0 to 65535)'),
('uint32', '32-bit unsigned integer (0 to 4294967295)'),
('uint64', '64-bit unsigned integer (0 to 18446744073709551615)'),
('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),
('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),
('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),
('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),
('float96', '96-bit extended-precision floating-point number type'),
('float128', '128-bit extended-precision floating-point number type'),
('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),
('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
])
def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
o = getattr(_numerictypes, obj)
character_code = dtype(o).char
canonical_name_doc = "" if obj == o.__name__ else "Canonical name: ``np.{}``.\n ".format(obj)
alias_doc = ''.join("Alias: ``np.{}``.\n ".format(alias) for alias in fixed_aliases)
alias_doc += ''.join("Alias *on this platform*: ``np.{}``: {}.\n ".format(alias, doc)
for (alias_type, alias, doc) in possible_aliases if alias_type is o)
docstring = """
{doc}
Character code: ``'{character_code}'``.
{canonical_name_doc}{alias_doc}
""".format(doc=doc.strip(), character_code=character_code,
canonical_name_doc=canonical_name_doc, alias_doc=alias_doc)
add_newdoc('numpy.core.numerictypes', obj, docstring)
add_newdoc_for_scalar_type('bool_', ['bool8'],
"""
Boolean type (True or False), stored as a byte.
""")
add_newdoc_for_scalar_type('byte', [],
"""
Signed integer type, compatible with C ``char``.
""")
add_newdoc_for_scalar_type('short', [],
"""
Signed integer type, compatible with C ``short``.
""")
add_newdoc_for_scalar_type('intc', [],
"""
Signed integer type, compatible with C ``int``.
""")
add_newdoc_for_scalar_type('int_', [],
"""
Signed integer type, compatible with Python `int` anc C ``long``.
""")
add_newdoc_for_scalar_type('longlong', [],
"""
Signed integer type, compatible with C ``long long``.
""")
add_newdoc_for_scalar_type('ubyte', [],
"""
Unsigned integer type, compatible with C ``unsigned char``.
""")
add_newdoc_for_scalar_type('ushort', [],
"""
Unsigned integer type, compatible with C ``unsigned short``.
""")
add_newdoc_for_scalar_type('uintc', [],
"""
Unsigned integer type, compatible with C ``unsigned int``.
""")
add_newdoc_for_scalar_type('uint', [],
"""
Unsigned integer type, compatible with C ``unsigned long``.
""")
add_newdoc_for_scalar_type('ulonglong', [],
"""
Signed integer type, compatible with C ``unsigned long long``.
""")
add_newdoc_for_scalar_type('half', [],
"""
Half-precision floating-point number type.
""")
add_newdoc_for_scalar_type('single', [],
"""
Single-precision floating-point number type, compatible with C ``float``.
""")
add_newdoc_for_scalar_type('double', ['float_'],
"""
Double-precision floating-point number type, compatible with Python `float`
and C ``double``.
""")
add_newdoc_for_scalar_type('longdouble', ['longfloat'],
"""
Extended-precision floating-point number type, compatible with C
``long double`` but not necessarily with IEEE 754 quadruple-precision.
""")
add_newdoc_for_scalar_type('csingle', ['singlecomplex'],
"""
Complex number type composed of two single-precision floating-point
numbers.
""")
add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'],
"""
Complex number type composed of two double-precision floating-point
numbers, compatible with Python `complex`.
""")
add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'],
"""
Complex number type composed of two extended-precision floating-point
numbers.
""")
add_newdoc_for_scalar_type('object_', [],
"""
Any Python object.
""")
|
gfyoung/numpy
|
numpy/core/_add_newdocs.py
|
Python
|
bsd-3-clause
| 211,533
|
[
"Brian"
] |
692f32c18410287e1bf650a1424dca659c267a68d17972b95ea2a4b653f83b58
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_version(*file_paths):
"""Retrieves the version from djcloudbridge/__init__.py"""
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
version = get_version("djcloudbridge", "__init__.py")
if sys.argv[-1] == 'publish':
try:
import wheel
print("Wheel version: ", wheel.__version__)
except ImportError:
print('Wheel library missing. Please run "pip install wheel"')
sys.exit()
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
print("Tagging the version on git:")
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
REQS_BASE = [
'django-polymorphic>=2.0.3', # for polymorphic model support
'django-rest-polymorphic==0.1.8', # drf plugin for polymorphic models, 0.1.9 came out in April 2020 and breaks CL
'django-nested-admin>=3.0.21', # for nested object editing in django admin
'djangorestframework>=3.0.0',
'coreapi>=2.2.3', # Provides REST API schema
'drf-nested-routers',
'django-rest-auth', # for user serialization
'django-fernet-fields', # for encryption of user cloud credentials
'sqlparse', # For migrations
'cloudbridge'
]
REQS_TEST = ([
'tox>=2.9.1',
'moto',
'coverage>=4.4.1',
'flake8>=3.4.1',
'flake8-import-order>=0.13'] + REQS_BASE
)
REQS_DEV = ([
'sphinx>=1.3.1',
'bumpversion>=0.5.3'] + REQS_TEST
)
setup(
name='djcloudbridge',
version=version,
description=("A ReSTful web api backed by cloudbridge for"
" interacting with cloud providers"),
long_description=readme + '\n\n' + history,
author='Galaxy and GVL Projects',
author_email='help@CloudVE.org',
url='https://github.com/cloudve/djcloudbridge',
packages=[
'djcloudbridge',
],
include_package_data=True,
install_requires=REQS_BASE,
extras_require={
'dev': REQS_DEV,
'test': REQS_TEST
},
license="MIT",
zip_safe=False,
keywords='djcloudbridge',
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
],
)
|
CloudVE/djcloudbridge
|
setup.py
|
Python
|
mit
| 3,003
|
[
"Galaxy"
] |
b15369ac6a40f763db755eb98474ebc3d099f265a4bae989edccd2696abef194
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bayesian NN using expectation propagation (Black-Box Alpha-Divergence).
See https://arxiv.org/abs/1511.03243 for details.
All formulas used in this implementation are derived in:
https://www.overleaf.com/12837696kwzjxkyhdytk#/49028744/.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
from absl import flags
from bandits.core.bayesian_nn import BayesianNN
FLAGS = flags.FLAGS
tfd = tf.contrib.distributions # update to: tensorflow_probability.distributions
def log_gaussian(x, mu, sigma, reduce_sum=True):
res = tfd.Normal(mu, sigma).log_prob(x)
if reduce_sum:
return tf.reduce_sum(res)
else:
return res
class BBAlphaDivergence(BayesianNN):
"""Implements an approximate Bayesian NN via Black-Box Alpha-Divergence."""
def __init__(self, hparams, name):
self.name = name
self.hparams = hparams
self.alpha = getattr(self.hparams, 'alpha', 1.0)
self.num_mc_nn_samples = getattr(self.hparams, 'num_mc_nn_samples', 10)
self.n_in = self.hparams.context_dim
self.n_out = self.hparams.num_actions
self.layers = self.hparams.layer_sizes
self.batch_size = self.hparams.batch_size
self.show_training = self.hparams.show_training
self.freq_summary = self.hparams.freq_summary
self.verbose = getattr(self.hparams, 'verbose', True)
self.cleared_times_trained = self.hparams.cleared_times_trained
self.initial_training_steps = self.hparams.initial_training_steps
self.training_schedule = np.linspace(self.initial_training_steps,
self.hparams.training_epochs,
self.cleared_times_trained)
self.times_trained = 0
self.initialize_model()
def initialize_model(self):
"""Builds and initialize the model."""
self.num_w = 0
self.num_b = 0
self.weights_m = {}
self.weights_std = {}
self.biases_m = {}
self.biases_std = {}
self.h_max_var = []
if self.hparams.use_sigma_exp_transform:
self.sigma_transform = tfd.bijectors.Exp()
else:
self.sigma_transform = tfd.bijectors.Softplus()
# Build the graph corresponding to the Bayesian NN instance.
self.graph = tf.Graph()
with self.graph.as_default():
self.sess = tf.Session()
self.x = tf.placeholder(shape=[None, self.n_in],
dtype=tf.float32, name='x')
self.y = tf.placeholder(shape=[None, self.n_out],
dtype=tf.float32, name='y')
self.weights = tf.placeholder(shape=[None, self.n_out],
dtype=tf.float32, name='w')
self.data_size = tf.placeholder(tf.float32, shape=(), name='data_size')
self.prior_variance = self.hparams.prior_variance
if self.prior_variance < 0:
# if not fixed, we learn the prior.
self.prior_variance = self.sigma_transform.forward(
self.build_mu_variable([1, 1]))
self.build_model()
self.sess.run(tf.global_variables_initializer())
def build_mu_variable(self, shape):
"""Returns a mean variable initialized as N(0, 0.05)."""
return tf.Variable(tf.random_normal(shape, 0.0, 0.05))
def build_sigma_variable(self, shape, init=-5.):
"""Returns a sigma variable initialized as N(init, 0.05)."""
# Initialize sigma to be very small initially to encourage MAP opt first
return tf.Variable(tf.random_normal(shape, init, 0.05))
def build_layer(self, input_x, shape, layer_id, activation_fn=tf.nn.relu):
"""Builds a layer with N(mean, std) for each weight, and samples from it."""
w_mu = self.build_mu_variable(shape)
w_sigma = self.sigma_transform.forward(self.build_sigma_variable(shape))
w_noise = tf.random_normal(shape)
w = w_mu + w_sigma * w_noise
b_mu = self.build_mu_variable([1, shape[1]])
b_sigma = self.sigma_transform.forward(
self.build_sigma_variable([1, shape[1]]))
b_noise = tf.random_normal([1, shape[1]])
b = b_mu + b_sigma * b_noise
# Create outputs
output_h = activation_fn(tf.matmul(input_x, w) + b)
# Store means and stds
self.weights_m[layer_id] = w_mu
self.weights_std[layer_id] = w_sigma
self.biases_m[layer_id] = b_mu
self.biases_std[layer_id] = b_sigma
return output_h
def sample_neural_network(self, activation_fn=tf.nn.relu):
"""Samples a nn from posterior, computes data log lk and log f factor."""
with self.graph.as_default():
log_f = 0
n = self.data_size
input_x = self.x
for layer_id in range(self.total_layers):
# load mean and std of each weight
w_mu = self.weights_m[layer_id]
w_sigma = self.weights_std[layer_id]
b_mu = self.biases_m[layer_id]
b_sigma = self.biases_std[layer_id]
# sample weights from Gaussian distribution
shape = w_mu.shape
w_noise = tf.random_normal(shape)
b_noise = tf.random_normal([1, int(shape[1])])
w = w_mu + w_sigma * w_noise
b = b_mu + b_sigma * b_noise
# compute contribution to log_f
t1 = w * w_mu / (n * w_sigma ** 2)
t2 = (0.5 * w ** 2 / n) * (1 / self.prior_variance - 1 / w_sigma ** 2)
log_f += tf.reduce_sum(t1 + t2)
t1 = b * b_mu / (n * b_sigma ** 2)
t2 = (0.5 * b ** 2 / n) * (1 / self.prior_variance - 1 / b_sigma ** 2)
log_f += tf.reduce_sum(t1 + t2)
if layer_id < self.total_layers - 1:
output_h = activation_fn(tf.matmul(input_x, w) + b)
else:
output_h = tf.matmul(input_x, w) + b
input_x = output_h
# compute log likelihood of the observed reward under the sampled nn
log_likelihood = log_gaussian(
self.y, output_h, self.noise_sigma, reduce_sum=False)
weighted_log_likelihood = tf.reduce_sum(log_likelihood * self.weights, -1)
return log_f, weighted_log_likelihood
def log_z_q(self):
"""Computes log-partition function of current posterior parameters."""
with self.graph.as_default():
log_z_q = 0
for layer_id in range(self.total_layers):
w_mu = self.weights_m[layer_id]
w_sigma = self.weights_std[layer_id]
b_mu = self.biases_m[layer_id]
b_sigma = self.biases_std[layer_id]
w_term = 0.5 * tf.reduce_sum(w_mu ** 2 / w_sigma ** 2)
w_term += 0.5 * tf.reduce_sum(tf.log(2 * np.pi) + 2 * tf.log(w_sigma))
b_term = 0.5 * tf.reduce_sum(b_mu ** 2 / b_sigma ** 2)
b_term += 0.5 * tf.reduce_sum(tf.log(2 * np.pi) + 2 * tf.log(b_sigma))
log_z_q += w_term + b_term
return log_z_q
def log_z_prior(self):
"""Computes log-partition function of the prior parameters."""
num_params = self.num_w + self.num_b
return num_params * 0.5 * tf.log(2 * np.pi * self.prior_variance)
def log_alpha_likelihood_ratio(self, activation_fn=tf.nn.relu):
# each nn sample returns (log f, log likelihoods)
nn_samples = [
self.sample_neural_network(activation_fn)
for _ in range(self.num_mc_nn_samples)
]
nn_log_f_samples = [elt[0] for elt in nn_samples]
nn_log_lk_samples = [elt[1] for elt in nn_samples]
# we stack the (log f, log likelihoods) from the k nn samples
nn_log_f_stack = tf.stack(nn_log_f_samples) # k x 1
nn_log_lk_stack = tf.stack(nn_log_lk_samples) # k x N
nn_f_tile = tf.tile(nn_log_f_stack, [self.batch_size])
nn_f_tile = tf.reshape(nn_f_tile,
[self.num_mc_nn_samples, self.batch_size])
# now both the log f and log likelihood terms have shape: k x N
# apply formula in https://www.overleaf.com/12837696kwzjxkyhdytk#/49028744/
nn_log_ratio = nn_log_lk_stack - nn_f_tile
nn_log_ratio = self.alpha * tf.transpose(nn_log_ratio)
logsumexp_value = tf.reduce_logsumexp(nn_log_ratio, -1)
log_k_scalar = tf.log(tf.cast(self.num_mc_nn_samples, tf.float32))
log_k = log_k_scalar * tf.ones([self.batch_size])
return tf.reduce_sum(logsumexp_value - log_k, -1)
def build_model(self, activation_fn=tf.nn.relu):
"""Defines the actual NN model with fully connected layers.
Args:
activation_fn: Activation function for the neural network.
The loss is computed for partial feedback settings (bandits), so only
the observed outcome is backpropagated (see weighted loss).
Selects the optimizer and, finally, it also initializes the graph.
"""
print('Initializing model {}.'.format(self.name))
# Build terms for the noise sigma estimation for each action.
noise_sigma_mu = (self.build_mu_variable([1, self.n_out])
+ self.sigma_transform.inverse(self.hparams.noise_sigma))
noise_sigma_sigma = self.sigma_transform.forward(
self.build_sigma_variable([1, self.n_out]))
pre_noise_sigma = noise_sigma_mu + tf.random_normal(
[1, self.n_out]) * noise_sigma_sigma
self.noise_sigma = self.sigma_transform.forward(pre_noise_sigma)
# Build network
input_x = self.x
n_in = self.n_in
self.total_layers = len(self.layers) + 1
if self.layers[0] == 0:
self.total_layers = 1
for l_number, n_nodes in enumerate(self.layers):
if n_nodes > 0:
h = self.build_layer(input_x, [n_in, n_nodes], l_number)
input_x = h
n_in = n_nodes
self.num_w += n_in * n_nodes
self.num_b += n_nodes
self.y_pred = self.build_layer(input_x, [n_in, self.n_out],
self.total_layers - 1,
activation_fn=lambda x: x)
# Compute energy function based on sampled nn's
log_coeff = self.data_size / (self.batch_size * self.alpha)
log_ratio = log_coeff * self.log_alpha_likelihood_ratio(activation_fn)
logzprior = self.log_z_prior()
logzq = self.log_z_q()
energy = logzprior - logzq - log_ratio
self.loss = energy
self.global_step = tf.train.get_or_create_global_step()
self.train_op = tf.train.AdamOptimizer(self.hparams.initial_lr).minimize(
self.loss, global_step=self.global_step)
# Useful for debugging
sq_loss = tf.squared_difference(self.y_pred, self.y)
weighted_sq_loss = self.weights * sq_loss
self.cost = tf.reduce_sum(weighted_sq_loss) / self.batch_size
# Create tensorboard metrics
self.create_summaries()
self.summary_writer = tf.summary.FileWriter('{}/graph_{}'.format(
FLAGS.logdir, self.name), self.sess.graph)
def create_summaries(self):
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('cost', self.cost)
self.summary_op = tf.summary.merge_all()
def assign_lr(self):
"""Resets the learning rate in dynamic schedules for subsequent trainings.
In bandits settings, we do expand our dataset over time. Then, we need to
re-train the network with the new data. Those algorithms that do not keep
the step constant, can reset it at the start of each training process.
"""
decay_steps = 1
if self.hparams.activate_decay:
current_gs = self.sess.run(self.global_step)
with self.graph.as_default():
self.lr = tf.train.inverse_time_decay(self.hparams.initial_lr,
self.global_step - current_gs,
decay_steps,
self.hparams.lr_decay_rate)
def train(self, data, num_steps):
"""Trains the BNN for num_steps, using the data in 'data'.
Args:
data: ContextualDataset object that provides the data.
num_steps: Number of minibatches to train the network for.
"""
if self.times_trained < self.cleared_times_trained:
num_steps = int(self.training_schedule[self.times_trained])
self.times_trained += 1
if self.verbose:
print('Training {} for {} steps...'.format(self.name, num_steps))
with self.graph.as_default():
for step in range(num_steps):
x, y, w = data.get_batch_with_weights(self.hparams.batch_size)
_, summary, global_step, loss = self.sess.run(
[self.train_op, self.summary_op, self.global_step, self.loss],
feed_dict={self.x: x, self.y: y, self.weights: w,
self.data_size: data.num_points()})
weights_l = self.sess.run(self.weights_std[0])
self.h_max_var.append(np.max(weights_l))
if step % self.freq_summary == 0:
if self.show_training:
print('step: {}, loss: {}'.format(step, loss))
sys.stdout.flush()
self.summary_writer.add_summary(summary, global_step)
|
cshallue/models
|
research/deep_contextual_bandits/bandits/algorithms/bb_alpha_divergence_model.py
|
Python
|
apache-2.0
| 13,407
|
[
"Gaussian"
] |
88ed77e34ea9e2c844dda9f4469e3f0d97024249a9e7f250ceb64009cbfa96b4
|
from __future__ import print_function
import numpy as np
import cv2
#import tensorflow as tf
import sys
sys.path.append('./agent')
sys.path.append('./deep_feedback_learning')
from agent.doom_simulator import DoomSimulator
#from agent.agent import Agent
from deep_feedback_learning import DeepFeedbackLearning
import threading
from matplotlib import pyplot as plt
width = 320
widthIn = 320
height = 240
heightIn = 240
nFiltersInput = 3
nFiltersHidden = 3
nHidden = [16, 10, 10]
# nFiltersHidden = 0 means that the layer is linear without filters
minT = 3
maxT = 15
deepBP = DeepFeedbackLearning(width * height, [nHidden[0], nHidden[1], nHidden[2]], 1, nFiltersInput, nFiltersHidden, minT, maxT)
# init the weights
deepBP.getLayer(0).setConvolution(width, height)
deepBP.initWeights(1E-6, 1)
deepBP.setBias(1)
deepBP.setAlgorithm(DeepFeedbackLearning.ico)
deepBP.setLearningRate(1E-4)
deepBP.seedRandom(89)
deepBP.setUseDerivative(1)
preprocess_input_images = lambda x: x / 255. - 0.5
sharpen = np.array((
[0, 1, 0],
[1, 4, 1],
[0, 1, 0]), dtype="int")
edge = np.array((
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]), dtype="int")
plt.ion()
plt.show()
ln = False
def getWeights(neuron):
n_neurons = deepBP.getLayer(0).getNneurons()
n_inputs = deepBP.getLayer(0).getNeuron(neuron).getNinputs()
weights = np.zeros(n_inputs)
for i in range(n_inputs):
if deepBP.getLayer(0).getNeuron(neuron).getMask(i):
weights[i] = deepBP.getLayer(0).getNeuron(neuron).getAvgWeight(i)
else:
weights[i] = np.nan
return weights.reshape(height,width)
def plotWeights():
global ln
while True:
if ln:
ln.remove()
w1 = getWeights(0)
for i in range(1,deepBP.getLayer(0).getNneurons()):
w2 = getWeights(i)
w1 = np.where(np.isnan(w2),w1,w2)
ln = plt.imshow(w1,cmap='gray')
plt.draw()
print("*** UPDATE PLOT ***")
plt.pause(10)
def getColourImbalance(img, colour):
if(img.shape[0]) != 3:
print("Error in getColourImbalance: wrong number of image channels: ", img.shape)
return 0.
width = int(img.shape[2]/2)
height = int(img.shape[1]/2)
print ("width: ", width, "height", height)
avgLeft = np.average(img[:,:,:width], axis=1)
avgLeft = np.average(avgLeft, axis=1)
# avgLeft = np.dot(avgLeft, colour)
avgRight = np.average(img[:,:,width:], axis=1)
avgRight = np.average(avgRight, axis=1)
# avgRight = np.dot(avgRight, colour)
avgTop = np.average(img[:, :height, :], axis=1)
avgTop = np.average(avgTop, axis=1)
# avgTop = np.dot(avgTop, colour)
avgBottom = np.average(img[:, height:, :], axis=1)
avgBottom = np.average(avgBottom, axis=1)
# avgBottom = np.dot(avgBottom, colour)
print("avgLeft: ", avgLeft, " avgRight: ", avgRight, "avgTop", avgTop, "avgBottom", avgBottom)
return 1.
def getMaxColourPos(img, colour):
img = np.array(img, dtype='float64')
width = int(img.shape[1])
height = int(img.shape[0])
# img[:,10,10] = [0,0,255]
diff = np.ones(img.shape)
diff[:,:,0] = colour[0]
diff[:,:,1] = colour[1]
diff[:,:,2] = colour[2]
diff = np.absolute(np.add(diff, (-1*img)))
cv2.imwrite("/home/paul/tmp/Images/Positive/diff-" + ".jpg", diff)
diff = np.sum(diff, axis=2)
cv2.imwrite("/home/paul/tmp/Images/Positive/diffGrey-" + ".jpg", diff)
indx = np.argmin(diff)
indx0 = int(indx / width)
indx1 = indx % width
pts = np.asarray(np.where((np.mean(diff) - diff) > 150))
if (pts.shape[1]>0):
bottomLeft = np.array([np.amin(pts[1]), np.amin(pts[0])])
topRight = np.array([np.amax(pts[1]), np.amax(pts[0])])
else:
bottomLeft = []
topRight = []
print("COLOUR: ", [indx1, indx0])
# cv2.imwrite("/home/paul/tmp/Images/Positive/rect-" + ".jpg", img)
# print ("Colour diff: ", np.mean(diff) - diff[indx0,indx1])
return np.array([indx1, indx0]), bottomLeft, topRight, np.mean(diff) - diff[indx0,indx1]
def savePosImage(curr_step, centre, x1, y1, x2, y2, _img, myFile, width, height):
print ("img shape: ", img2.shape)
myFile.write("/home/paul/tmp/Images/" + str(curr_step) + ".jpg"
+ " 1"
+ " " + str(x1) + " " + str(y1)
+ " " + str(x2) + " " + str(y2) + "\n")
img = np.zeros(_img.shape,dtype=np.uint8)
outImage = Image.fromarray(img)
outImage.save("/home/paul/tmp/Images/Positive/" + str(curr_step) + ".jpg")
def saveNegImage(curr_step, img2, myFile, width, height):
myFile.write("/home/paul/tmp/Images/" + str(curr_step) + ".jpg\n")
# img2 = np.rollaxis(img2, 0, 3)
img = Image.fromarray(img2)
img.save("/home/paul/tmp/Images/Negative/" + str(curr_step) + ".jpg")
def main():
## Simulator
simulator_args = {}
simulator_args['config'] = 'config/config.cfg'
simulator_args['resolution'] = (widthIn,heightIn)
simulator_args['frame_skip'] = 1
simulator_args['color_mode'] = 'RGB24'
simulator_args['game_args'] = "+name ICO +colorset 7"
## Agent
agent_args = {}
# preprocessing
preprocess_input_images = lambda x: x / 255. - 0.5
agent_args['preprocess_input_images'] = lambda x: x / 255. - 0.5
agent_args['preprocess_input_measurements'] = lambda x: x / 100. - 0.5
agent_args['num_future_steps'] = 6
pred_scale_coeffs = np.expand_dims(
(np.expand_dims(np.array([8., 40., 1.]), 1) * np.ones((1, agent_args['num_future_steps']))).flatten(), 0)
agent_args['meas_for_net_init'] = range(3)
agent_args['meas_for_manual_init'] = range(3, 16)
agent_args['resolution'] = (width,height)
# just use grayscale for nnet inputs
agent_args['num_channels'] = 1
# net parameters
agent_args['conv_params'] = np.array([(16, 5, 4), (32, 3, 2), (64, 3, 2), (128, 3, 2)],
dtype=[('out_channels', int), ('kernel', int), ('stride', int)])
agent_args['fc_img_params'] = np.array([(128,)], dtype=[('out_dims', int)])
agent_args['fc_meas_params'] = np.array([(128,), (128,), (128,)], dtype=[('out_dims', int)])
agent_args['fc_joint_params'] = np.array([(256,), (256,), (-1,)], dtype=[('out_dims', int)])
agent_args['target_dim'] = agent_args['num_future_steps'] * len(agent_args['meas_for_net_init'])
agent_args['n_actions'] = 7
# experiment arguments
agent_args['test_objective_params'] = (np.array([5, 11, 17]), np.array([1., 1., 1.]))
agent_args['history_length'] = 3
agent_args['history_length_ico'] = 3
historyLen = agent_args['history_length']
print ("HistoryLen: ", historyLen)
print('starting simulator')
simulator = DoomSimulator(simulator_args)
num_channels = simulator.num_channels
print('started simulator')
agent_args['state_imgs_shape'] = (
historyLen * num_channels, simulator.resolution[1], simulator.resolution[0])
agent_args['n_ffnet_inputs'] = 2*(agent_args['resolution'][0]*agent_args['resolution'][1])
agent_args['n_ffnet_hidden'] = np.array([50,5])
agent_args['n_ffnet_outputs'] = 1
agent_args['n_ffnet_act'] = 7
agent_args['n_ffnet_meas'] = simulator.num_meas
agent_args['learning_rate'] = 1E-4
if 'meas_for_net_init' in agent_args:
agent_args['meas_for_net'] = []
for ns in range(historyLen):
agent_args['meas_for_net'] += [i + simulator.num_meas * ns for i in agent_args['meas_for_net_init']]
agent_args['meas_for_net'] = np.array(agent_args['meas_for_net'])
else:
agent_args['meas_for_net'] = np.arange(historyLen * simulator.num_meas)
if len(agent_args['meas_for_manual_init']) > 0:
agent_args['meas_for_manual'] = np.array([i + simulator.num_meas * (historyLen - 1) for i in
agent_args[
'meas_for_manual_init']]) # current timestep is the last in the stack
else:
agent_args['meas_for_manual'] = []
agent_args['state_meas_shape'] = (len(agent_args['meas_for_net']),)
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
# sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
# agent = Agent(sess, agent_args)
# agent.load('/home/paul/Dev/GameAI/vizdoom_cig2017/icolearner/ICO1/checkpoints/ICO-8600')
# print("model loaded..")
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
# sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
img_buffer = np.zeros(
(historyLen, simulator.resolution[1], simulator.resolution[0], num_channels), dtype='uint8')
meas_buffer = np.zeros((historyLen, simulator.num_meas))
act_buffer = np.zeros((historyLen, 7))
act_buffer_ico = np.zeros((agent_args['history_length_ico'], 7))
curr_step = 0
old_step = -1
term = False
print ("state_meas_shape: ", meas_buffer.shape, " == ", agent_args['state_meas_shape'])
print ("act_buffer_shape: ", act_buffer.shape)
# ag = Agent(agent_args)
diff_y = 0
diff_x = 0
diff_z = 0
diff_theta = 0
iter = 1
epoch = 200
radialFlowLeft = 30.
radialFlowRight = 30.
radialFlowInertia = 0.4
radialGain = 4.
rotationGain = 50.
errorThresh = 10.
updatePtsFreq = 50
skipImage = 1
skipImageICO = 5
reflexGain = 0.01
oldHealth = 0.
# create masks for left and right visual fields - note that these only cover the upper half of the image
# this is to help prevent the tracking getting confused by the floor pattern
half_height = round(height/2)
half_width = round(width/2)
maskLeft = np.zeros([height, width], np.uint8)
maskLeft[half_height:, :half_width] = 1.
maskRight = np.zeros([height, width], np.uint8)
maskRight[half_height:, half_width:] = 1.
# for ICO, the errors are the same dimensionality as the first hidden layer
netErr = np.zeros(nHidden[0])
# deepIcoEfference = Deep_ICO(simulator_args['resolution'][0] * simulator_args['resolution'][1] + 7, 10, 1)
nh = np.asarray([36,36])
# deepIcoEfference = Deep_ICO_Conv(1, [1], 1, Deep_ICO_Conv.conv)
# deepIcoEfference = Deep_ICO_Conv(simulator_args['resolution'][0] * simulator_args['resolution'][1] + 7,
# nh, simulator_args['resolution'][0] * simulator_args['resolution'][1], Deep_ICO_Conv.conv)
# deepIcoEfference.setLearningRate(0.01)
# deepIcoEfference.setAlgorithm(Deep_ICO.backprop)
# print ("Model type: ", "ff" if deepIcoEfference.getModelType() == 0 else "conv")
# deepIcoEfference.initWeights(1 / (np.sqrt(float(simulator_args['resolution'][0] * simulator_args['resolution'][1] + 7))))
# deepIcoEfference.initWeights(0.0)
outputImage = np.zeros(simulator_args['resolution'][0] * simulator_args['resolution'][1])
imageDiff = np.zeros(simulator_args['resolution'][0] * simulator_args['resolution'][1])
outputArray = np.zeros(1) #deepIcoEfference.getNoutputs())
lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict(maxCorners=500, qualityLevel=0.03, minDistance=7, blockSize=7)
imgCentre = np.array([simulator_args['resolution'][0] / 2, simulator_args['resolution'][1] /2])
print ("Image centre: ", imgCentre)
simpleInputs1 = np.zeros((width, height))
simpleInputs2 = np.zeros((width, height))
input_buff = np.zeros((1,width*height))
target_buff = np.zeros((1,1))
t = threading.Thread(target=plotWeights)
t.start()
while not term:
if curr_step < historyLen:
curr_act = np.zeros(7).tolist()
img, meas, rwrd, term = simulator.step(curr_act)
print("Image: ", img.shape, " max: ", np.amax(img), " min: ", np.amin(img))
if curr_step == 0:
p0Left = cv2.goodFeaturesToTrack(img[:,:,0], mask=maskLeft, **feature_params)
p0Right = cv2.goodFeaturesToTrack(img[:,:,0], mask=maskRight, **feature_params)
img_buffer[curr_step % historyLen] = img
meas_buffer[curr_step % historyLen] = meas
act_buffer[curr_step % historyLen] = curr_act[:7]
else:
img1 = img_buffer[(curr_step-2) % historyLen,:,:,:]
img2 = img_buffer[(curr_step-1) % historyLen,:,:,:]
state = simulator._game.get_state()
stateImg = state.screen_buffer
greyImg1 = np.sum(img1, axis=0)
greyImg2 = cv2.resize(stateImg, (width,height))
greyImg2 = np.array(np.sum(greyImg2, axis=2)/3, dtype='uint8')
if(curr_step % updatePtsFreq == 0):
p0Left = cv2.goodFeaturesToTrack(img[:,:,0], mask=maskLeft, **feature_params)
p0Right = cv2.goodFeaturesToTrack(img[:,:,0], mask=maskRight, **feature_params)
p1Left, st, err = cv2.calcOpticalFlowPyrLK(img1[:,:,0], img2[:,:,0], p0Left, None, **lk_params)
p1Right, st, err = cv2.calcOpticalFlowPyrLK(img1[:,:,0], img2[:,:,0], p0Right, None, **lk_params)
flowLeft = (p1Left - p0Left)[:,0,:]
flowRight = (p1Right - p0Right)[:,0,:]
radialFlowTmpLeft = 0
radialFlowTmpRight = 0
for i in range(0, len(p0Left)):
radialFlowTmpLeft += ((p0Left[i,0,:] - imgCentre)).dot(flowLeft[i,:]) / float(len(p0Left))
for i in range(0, len(p0Right)):
radialFlowTmpRight += ((p0Right[i,0,:] - imgCentre)).dot(flowRight[i,:]) / float(len(p0Right))
rotation = act_buffer[(curr_step - 1) % historyLen][6]
forward = act_buffer[(curr_step - 1) % historyLen][3]
# keep separate radial errors for left and right fields
radialFlowLeft = radialFlowLeft + radialFlowInertia * (radialFlowTmpLeft - radialFlowLeft)
radialFlowRight = radialFlowRight + radialFlowInertia * (radialFlowTmpRight - radialFlowRight)
expectFlowLeft = radialGain * forward + (rotationGain * rotation if rotation < 0. else 0.)
expectFlowRight = radialGain * forward - (rotationGain * rotation if rotation > 0. else 0.)
flowErrorLeft = forward * (expectFlowLeft - radialFlowLeft) / (1. + rotationGain * np.abs(rotation))
flowErrorRight = forward * (expectFlowRight - radialFlowRight) / (1. + rotationGain * np.abs(rotation))
flowErrorLeft = flowErrorLeft if flowErrorLeft > 0. else 0.
flowErrorRight = flowErrorRight if flowErrorRight > 0. else 0.
icoSteer = 0.
if curr_step > 100:
health = meas[1]
# Don't run any networks when the player is dead!
if (health < 101. and health > 0.):
#print (curr_step)
icoInLeft = (flowErrorLeft - errorThresh) if (flowErrorLeft - errorThresh) > 0. else 0. / reflexGain
icoInRight = (flowErrorRight - errorThresh) if (flowErrorRight - errorThresh) > 0. else 0. / reflexGain
icoInSteer = ((flowErrorRight - errorThresh) if (flowErrorRight - errorThresh) > 0. else 0. / reflexGain -
(flowErrorLeft - errorThresh) if (flowErrorLeft - errorThresh) > 0. else 0. / reflexGain)
centre1, bottomLeft1, topRight1, colourStrength1 = getMaxColourPos(img1, [255, 0, 0])
centre2, bottomLeft2, topRight2, colourStrength2 = getMaxColourPos(img2, [255, 0, 0])
colourSteer = centre2[0]
# get the setpoint in the -.9/+.9 range
simpleInputs1[:,:] = 0.1*np.random.rand(width, height)
simpleInputs2[:,:] = 0.1*np.random.rand(width, height)
sp = 1.8*(colourSteer - imgCentre[0]) / width
print ("ColourSteer: ", colourSteer, " ColourStrength: ", colourStrength2)
if(colourStrength2 > 150.):
#print ("ColourSteer: ", colourSteer, " ColourStrength: ", colourStrength)
#inputs[colourSteer,:] = colourStrength / 300.
simpleInputs2[bottomLeft2[0]:topRight2[0], bottomLeft2[1]:topRight2[1]] = 1.
#print(bottomLeft[0], bottomLeft[1], topRight[0], topRight[1], np.sum(inputs))
else:
colourStrength2 = 0.
sp =0
if (colourStrength1 > 150.):
simpleInputs1[bottomLeft1[0]:topRight1[0], bottomLeft1[1]:topRight1[1]] = 1.
netErr[:] = 0.
#deepBP.doStep(np.ndarray.flatten(inputs), np.ndarray.flatten(netErr))
#icoSteer = deepBP.getOutput(0)
#delta = sp - icoSteer
delta = 0.06 * colourStrength2 * (colourSteer - imgCentre[0])/width
#delta = 0.6 * max(min((icoInSteer), 5.), -5.)
#delta = 1. - icoSteer
#input_buff[0,:] = preprocess_input_images(np.ndarray.flatten(img2[2,:,:]))
#input_buff[0,:] = np.ndarray.flatten(inputs)
#input_buff[0,:] = np.concatenate([np.ndarray.flatten(greyImg1), np.ndarray.flatten(greyImg2)])
greyImg2 = cv2.filter2D(greyImg2, -1, edge)
input_buff[0,:] = np.ndarray.flatten(preprocess_input_images(greyImg2))
target_buff[0,0] = delta
if (False):
deepBP.setLearningRate(0.)
#net_output = np.ndarray.flatten(agent.test_ffnet(input_buff))[0]
#else:
#net_output = np.ndarray.flatten(agent.learn_ffnet(input_buff, target_buff))[0]
netErr[:] = delta
deepBP.doStep(preprocess_input_images(greyImg2.flatten()), netErr.flatten())
icoSteer = deepBP.getOutput(0)
print (" ** ", curr_step, icoSteer, " ", delta, " ", colourStrength2)
diff_theta = 0.6 * max(min((icoInSteer), 5.), -5.)
diff_theta = diff_theta + 0.01 * colourStrength2 * (colourSteer - imgCentre[0])/width
diff_theta = diff_theta + 10. * icoSteer
#diff_theta = diff_theta + 20. * net_output
curr_act = np.zeros(7).tolist()
curr_act[0] = 0
curr_act[1] = 0
curr_act[2] = 0
curr_act[3] = curr_act[3] + diff_z
curr_act[3] = 0.
curr_act[4] = 0
curr_act[5] = 0
curr_act[6] = curr_act[6] + diff_theta
oldHealth = health
img, meas, rwrd, term = simulator.step(curr_act)
if (not (meas is None)) and meas[0] > 30.:
meas[0] = 30.
if not term:
img_buffer[curr_step % historyLen] = img
meas_buffer[curr_step % historyLen] = meas
act_buffer[curr_step % historyLen] = curr_act[:7]
#if curr_step % epoch == 0:
# agent.save('/home/paul/Dev/GameAI/vizdoom_cig2017/icolearner/ICO1/checkpoints', curr_step)
# np.save('/home/paul/tmp/icoSteer-' + str(curr_step), icoSteer.weights)
# np.save('/home/paul/tmp/imageDiff-' + str(curr_step), imageDiff)
# np.save('/home/paul/tmp/icoDetect-' + str(curr_step), icoDetect.weights)
# icoSteer.saveInputs(curr_step)
curr_step += 1
simulator.close_game()
# ag.save('/home/paul/Dev/GameAI/vizdoom_cig2017/icolearner/ICO1/checkpoints/' + 'hack-' + str(iter))
if __name__ == '__main__':
main()
|
nlholdem/icodoom
|
ICO1/run_agent.py
|
Python
|
gpl-3.0
| 19,933
|
[
"NEURON"
] |
fbdf068d35fd248c961b0bc8fd73e5beab49125cbf5304ff9b6a330088e5fd8c
|
# $Id: dbexts.py,v 1.1 2005/10/05 20:19:27 eytanadar Exp $
"""
This script provides platform independence by wrapping Python
Database API 2.0 compatible drivers to allow seamless database
usage across implementations.
In order to use the C version, you need mxODBC and mxDateTime.
In order to use the Java version, you need zxJDBC.
>>> import dbexts
>>> d = dbexts.dbexts() # use the default db
>>> d.isql('select count(*) count from player')
count
-------
13569.0
1 row affected
>>> r = d.raw('select count(*) count from player')
>>> r
([('count', 3, 17, None, 15, 0, 1)], [(13569.0,)])
>>>
The configuration file follows the following format in a file name dbexts.ini:
[default]
name=mysql
[jdbc]
name=mysql
url=jdbc:mysql://localhost/ziclix
user=
pwd=
driver=org.gjt.mm.mysql.Driver
datahandler=com.ziclix.python.sql.handler.MySQLDataHandler
[jdbc]
name=pg
url=jdbc:postgresql://localhost:5432/ziclix
user=bzimmer
pwd=
driver=org.postgresql.Driver
datahandler=com.ziclix.python.sql.handler.PostgresqlDataHandler
"""
import os, string, re
__author__ = "brian zimmer (bzimmer@ziclix.com)"
__version__ = "$Revision: 1.1 $"[11:-2]
__OS__ = os.name
choose = lambda bool, a, b: (bool and [a] or [b])[0]
def console(rows, headers=()):
"""Format the results into a list of strings (one for each row):
<header>
<headersep>
<row1>
<row2>
...
headers may be given as list of strings.
Columns are separated by colsep; the header is separated from
the result set by a line of headersep characters.
The function calls stringify to format the value data into a string.
It defaults to calling str() and striping leading and trailing whitespace.
- copied and modified from mxODBC
"""
# Check row entry lengths
output = []
headers = map(string.upper, list(map(lambda x: x or "", headers)))
collen = map(len,headers)
output.append(headers)
if rows and len(rows) > 0:
for row in rows:
row = map(lambda x: str(x), row)
for i in range(len(row)):
entry = row[i]
if collen[i] < len(entry):
collen[i] = len(entry)
output.append(row)
if len(output) == 1:
affected = "0 rows affected"
elif len(output) == 2:
affected = "1 row affected"
else:
affected = "%d rows affected" % (len(output) - 1)
# Format output
for i in range(len(output)):
row = output[i]
l = []
for j in range(len(row)):
l.append('%-*s' % (collen[j],row[j]))
output[i] = string.join(l, " | ")
# Insert header separator
totallen = len(output[0])
output[1:1] = ["-"*(totallen/len("-"))]
output.append("\n" + affected)
return output
def html(rows, headers=()):
output = []
output.append('<table class="results">')
output.append('<tr class="headers">')
headers = map(lambda x: '<td class="header">%s</td>' % (x.upper()), list(headers))
map(output.append, headers)
output.append('</tr>')
if rows and len(rows) > 0:
for row in rows:
output.append('<tr class="row">')
row = map(lambda x: '<td class="value">%s</td>' % (x), row)
map(output.append, row)
output.append('</tr>')
output.append('</table>')
return output
comments = lambda x: re.compile("{.*?}", re.S).sub("", x, 0)
class ex_proxy:
"""Wraps mxODBC to provide proxy support for zxJDBC's additional parameters."""
def __init__(self, c):
self.c = c
def __getattr__(self, name):
if name == "execute":
return self.execute
elif name == "gettypeinfo":
return self.gettypeinfo
else:
return getattr(self.c, name)
def execute(self, sql, params=None, bindings=None, maxrows=None):
if params:
self.c.execute(sql, params)
else:
self.c.execute(sql)
def gettypeinfo(self, typeid=None):
if typeid:
self.c.gettypeinfo(typeid)
class executor:
"""Handles the insertion of values given dynamic data."""
def __init__(self, table, cols):
self.cols = cols
self.table = table
if self.cols:
self.sql = "insert into %s (%s) values (%s)" % (table, ",".join(self.cols), ",".join(("?",) * len(self.cols)))
else:
self.sql = "insert into %s values (%%s)" % (table)
def execute(self, db, rows, bindings):
assert rows and len(rows) > 0, "must have at least one row"
if self.cols:
sql = self.sql
else:
sql = self.sql % (",".join(("?",) * len(rows[0])))
db.raw(sql, rows, bindings)
def connect(dbname):
return dbexts(dbname)
def lookup(dbname):
return dbexts(jndiname=dbname)
class dbexts:
def __init__(self, dbname=None, cfg=None, formatter=console, autocommit=1, jndiname=None, out=None):
self.verbose = 1
self.results = None
self.headers = None
self.datahandler = None
self.autocommit = autocommit
self.formatter = formatter
self.out = out
self.lastrowid = None
self.updatecount = None
if not jndiname:
if cfg == None:
fn = os.path.join(os.path.split(__file__)[0], "dbexts.ini")
if not os.path.exists(fn):
fn = os.path.join(os.environ['HOME'], ".dbexts")
self.dbs = IniParser(fn)
elif isinstance(cfg, IniParser):
self.dbs = cfg
else:
self.dbs = IniParser(cfg)
if dbname == None: dbname = self.dbs[("default", "name")]
if __OS__ == 'java':
from com.ziclix.python.sql import zxJDBC
database = zxJDBC
if not jndiname:
t = self.dbs[("jdbc", dbname)]
self.dburl, dbuser, dbpwd, jdbcdriver = t['url'], t['user'], t['pwd'], t['driver']
if t.has_key("datahandler"):
try:
datahandlerclass = string.split(t['datahandler'], ".")[-1]
self.datahandler = __import__(t['datahandler'], globals(), locals(), datahandlerclass)
except:
pass
keys = filter(lambda x: x not in ['url', 'user', 'pwd', 'driver', 'datahandler', 'name'], t.keys())
props = {}
for a in keys:
props[a] = t[a]
self.db = apply(database.connect, (self.dburl, dbuser, dbpwd, jdbcdriver), props)
else:
self.db = database.lookup(jndiname)
self.db.autocommit = 0
elif __OS__ == 'nt':
for modname in ["mx.ODBC.Windows", "ODBC.Windows"]:
try:
database = __import__(modname, globals(), locals(), "Windows")
break
except:
continue
else:
raise ImportError("unable to find appropriate mxODBC module")
t = self.dbs[("odbc", dbname)]
self.dburl, dbuser, dbpwd = t['url'], t['user'], t['pwd']
self.db = database.Connect(self.dburl, dbuser, dbpwd, clear_auto_commit=1)
for a in database.sqltype.keys():
setattr(self, database.sqltype[a], a)
del database
def __str__(self):
return self.dburl
def __repr__(self):
return self.dburl
def __getattr__(self, name):
if "cfg" == name:
return self.dbs.cfg
def close(self):
""" close the connection to the database """
self.db.close()
def begin(self):
""" reset ivars and return a new cursor, possibly binding an auxiliary datahandler """
self.headers, self.results = None, None
c = self.db.cursor()
if __OS__ == 'java':
if self.datahandler: c.datahandler = self.datahandler(c.datahandler)
else:
c = ex_proxy(c)
return c
def commit(self, cursor=None):
""" commit the cursor and create the result set """
if cursor and cursor.description:
self.headers = cursor.description
self.results = cursor.fetchall()
if hasattr(cursor, "nextset"):
s = cursor.nextset()
while s:
f = cursor.fetchall()
if f: self.results = choose(self.results is None, [], self.results) + f
s = cursor.nextset()
if hasattr(cursor, "lastrowid"): self.lastrowid = cursor.lastrowid
if hasattr(cursor, "updatecount"): self.updatecount = cursor.updatecount
if self.autocommit or cursor is None: self.db.commit()
if cursor: cursor.close()
def rollback(self):
""" rollback the cursor """
self.db.rollback()
def display(self):
""" using the formatter, display the results """
if self.formatter and self.verbose > 0:
res = self.results
if res:
print >> self.out, ""
for a in self.formatter(res, map(lambda x: x[0], self.headers)):
print >> self.out, a
print >> self.out, ""
def __execute__(self, sql, params=None, bindings=None, maxrows=None):
""" the primary execution method """
cur = self.begin()
try:
if bindings:
cur.execute(sql, params, bindings, maxrows=maxrows)
elif params:
cur.execute(sql, params, maxrows=maxrows)
else:
cur.execute(sql, maxrows=maxrows)
finally:
self.commit(cur)
def isql(self, sql, params=None, bindings=None, maxrows=None):
""" execute and display the sql """
self.raw(sql, params, bindings, maxrows=maxrows)
self.display()
def raw(self, sql, params=None, bindings=None, delim=None, comments=comments, maxrows=None):
""" execute the sql and return a tuple of (headers, results) """
if delim:
headers = []
results = []
if comments: sql = comments(sql)
statements = filter(lambda x: len(x) > 0, map(string.strip, string.split(sql, delim)))
for a in statements:
self.__execute__(a, params, bindings, maxrows=maxrows)
headers.append(self.headers)
results.append(self.results)
self.headers = headers
self.results = results
else:
self.__execute__(sql, params, bindings, maxrows=maxrows)
return (self.headers, self.results)
def callproc(self, procname, params=None, bindings=None, maxrows=None):
""" execute a stored procedure """
cur = self.begin()
try:
cur.callproc(procname, params=params, bindings=bindings, maxrows=maxrows)
finally:
self.commit(cur)
self.display()
def pk(self, table, owner=None, schema=None):
""" display the table's primary keys """
cur = self.begin()
cur.primarykeys(schema, owner, table)
self.commit(cur)
self.display()
def fk(self, primary_table=None, foreign_table=None, owner=None, schema=None):
""" display the table's foreign keys """
cur = self.begin()
if primary_table and foreign_table:
cur.foreignkeys(schema, owner, primary_table, schema, owner, foreign_table)
elif primary_table:
cur.foreignkeys(schema, owner, primary_table, schema, owner, None)
elif foreign_table:
cur.foreignkeys(schema, owner, None, schema, owner, foreign_table)
self.commit(cur)
self.display()
def table(self, table=None, types=("TABLE",), owner=None, schema=None):
"""If no table argument, displays a list of all tables. If a table argument,
displays the columns of the given table."""
cur = self.begin()
if table:
cur.columns(schema, owner, table, None)
else:
cur.tables(schema, owner, None, types)
self.commit(cur)
self.display()
def proc(self, proc=None, owner=None, schema=None):
"""If no proc argument, displays a list of all procedures. If a proc argument,
displays the parameters of the given procedure."""
cur = self.begin()
if proc:
cur.procedurecolumns(schema, owner, proc, None)
else:
cur.procedures(schema, owner, None)
self.commit(cur)
self.display()
def stat(self, table, qualifier=None, owner=None, unique=0, accuracy=0):
""" display the table's indicies """
cur = self.begin()
cur.statistics(qualifier, owner, table, unique, accuracy)
self.commit(cur)
self.display()
def typeinfo(self, sqltype=None):
""" display the types available for the database """
cur = self.begin()
cur.gettypeinfo(sqltype)
self.commit(cur)
self.display()
def tabletypeinfo(self):
""" display the table types available for the database """
cur = self.begin()
cur.gettabletypeinfo()
self.commit(cur)
self.display()
def schema(self, table, full=0, sort=1, owner=None):
"""Displays a Schema object for the table. If full is true, then generates
references to the table in addition to the standard fields. If sort is true,
sort all the items in the schema, else leave them in db dependent order."""
print >> self.out, str(Schema(self, table, owner, full, sort))
def bulkcopy(self, dst, table, include=[], exclude=[], autobatch=0, executor=executor):
"""Returns a Bulkcopy object using the given table."""
if type(dst) == type(""):
dst = dbexts(dst, cfg=self.dbs)
bcp = Bulkcopy(dst, table, include=include, exclude=exclude, autobatch=autobatch, executor=executor)
return bcp
def bcp(self, src, table, where='(1=1)', params=[], include=[], exclude=[], autobatch=0, executor=executor):
"""Bulkcopy of rows from a src database to the current database for a given table and where clause."""
if type(src) == type(""):
src = dbexts(src, cfg=self.dbs)
bcp = self.bulkcopy(self, table, include, exclude, autobatch, executor)
num = bcp.transfer(src, where, params)
return num
def unload(self, filename, sql, delimiter=",", includeheaders=1):
""" Unloads the delimited results of the query to the file specified, optionally including headers. """
u = Unload(self, filename, delimiter, includeheaders)
u.unload(sql)
class Bulkcopy:
"""The idea for a bcp class came from http://object-craft.com.au/projects/sybase"""
def __init__(self, dst, table, include=[], exclude=[], autobatch=0, executor=executor):
self.dst = dst
self.table = table
self.total = 0
self.rows = []
self.autobatch = autobatch
self.bindings = {}
include = map(lambda x: string.lower(x), include)
exclude = map(lambda x: string.lower(x), exclude)
_verbose = self.dst.verbose
self.dst.verbose = 0
try:
self.dst.table(self.table)
if self.dst.results:
colmap = {}
for a in self.dst.results:
colmap[a[3].lower()] = a[4]
cols = self.__filter__(colmap.keys(), include, exclude)
for a in zip(range(len(cols)), cols):
self.bindings[a[0]] = colmap[a[1]]
colmap = None
else:
cols = self.__filter__(include, include, exclude)
finally:
self.dst.verbose = _verbose
self.executor = executor(table, cols)
def __str__(self):
return "[%s].[%s]" % (self.dst, self.table)
def __repr__(self):
return "[%s].[%s]" % (self.dst, self.table)
def __getattr__(self, name):
if name == 'columns':
return self.executor.cols
def __filter__(self, values, include, exclude):
cols = map(string.lower, values)
if exclude:
cols = filter(lambda x, ex=exclude: x not in ex, cols)
if include:
cols = filter(lambda x, inc=include: x in inc, cols)
return cols
def format(self, column, type):
self.bindings[column] = type
def done(self):
if len(self.rows) > 0:
return self.batch()
return 0
def batch(self):
self.executor.execute(self.dst, self.rows, self.bindings)
cnt = len(self.rows)
self.total += cnt
self.rows = []
return cnt
def rowxfer(self, line):
self.rows.append(line)
if self.autobatch: self.batch()
def transfer(self, src, where="(1=1)", params=[]):
sql = "select %s from %s where %s" % (string.join(self.columns, ", "), self.table, where)
h, d = src.raw(sql, params)
if d:
map(self.rowxfer, d)
return self.done()
return 0
class Unload:
"""Unloads a sql statement to a file with optional formatting of each value."""
def __init__(self, db, filename, delimiter=",", includeheaders=1):
self.db = db
self.filename = filename
self.delimiter = delimiter
self.includeheaders = includeheaders
self.formatters = {}
def format(self, o):
if not o:
return ""
o = str(o)
if o.find(",") != -1:
o = "\"\"%s\"\"" % (o)
return o
def unload(self, sql, mode="w"):
headers, results = self.db.raw(sql)
w = open(self.filename, mode)
if self.includeheaders:
w.write("%s\n" % (string.join(map(lambda x: x[0], headers), self.delimiter)))
if results:
for a in results:
w.write("%s\n" % (string.join(map(self.format, a), self.delimiter)))
w.flush()
w.close()
class Schema:
"""Produces a Schema object which represents the database schema for a table"""
def __init__(self, db, table, owner=None, full=0, sort=1):
self.db = db
self.table = table
self.owner = owner
self.full = full
self.sort = sort
_verbose = self.db.verbose
self.db.verbose = 0
try:
if table: self.computeschema()
finally:
self.db.verbose = _verbose
def computeschema(self):
self.db.table(self.table, owner=self.owner)
self.columns = []
# (column name, type_name, size, nullable)
if self.db.results:
self.columns = map(lambda x: (x[3], x[5], x[6], x[10]), self.db.results)
if self.sort: self.columns.sort(lambda x, y: cmp(x[0], y[0]))
self.db.fk(None, self.table)
# (pk table name, pk column name, fk column name, fk name, pk name)
self.imported = []
if self.db.results:
self.imported = map(lambda x: (x[2], x[3], x[7], x[11], x[12]), self.db.results)
if self.sort: self.imported.sort(lambda x, y: cmp(x[2], y[2]))
self.exported = []
if self.full:
self.db.fk(self.table, None)
# (pk column name, fk table name, fk column name, fk name, pk name)
if self.db.results:
self.exported = map(lambda x: (x[3], x[6], x[7], x[11], x[12]), self.db.results)
if self.sort: self.exported.sort(lambda x, y: cmp(x[1], y[1]))
self.db.pk(self.table)
self.primarykeys = []
if self.db.results:
# (column name, key_seq, pk name)
self.primarykeys = map(lambda x: (x[3], x[4], x[5]), self.db.results)
if self.sort: self.primarykeys.sort(lambda x, y: cmp(x[1], y[1]))
self.db.stat(self.table)
# (non-unique, name, type, pos, column name, asc)
self.indices = []
if self.db.results:
idxdict = {}
# mxODBC returns a row of None's, so filter it out
idx = map(lambda x: (x[3], string.strip(x[5]), x[6], x[7], x[8]), filter(lambda x: x[5], self.db.results))
def cckmp(x, y):
c = cmp(x[1], y[1])
if c == 0: c = cmp(x[3], y[3])
return c
# sort this regardless, this gets the indicies lined up
idx.sort(cckmp)
for a in idx:
if not idxdict.has_key(a[1]):
idxdict[a[1]] = []
idxdict[a[1]].append(a)
self.indices = idxdict.values()
if self.sort: self.indices.sort(lambda x, y: cmp(x[0][1], y[0][1]))
def __str__(self):
d = []
d.append("Table")
d.append(" " + self.table)
d.append("\nPrimary Keys")
for a in self.primarykeys:
d.append(" %s {%s}" % (a[0], a[2]))
d.append("\nImported (Foreign) Keys")
for a in self.imported:
d.append(" %s (%s.%s) {%s}" % (a[2], a[0], a[1], a[3]))
if self.full:
d.append("\nExported (Referenced) Keys")
for a in self.exported:
d.append(" %s (%s.%s) {%s}" % (a[0], a[1], a[2], a[3]))
d.append("\nColumns")
for a in self.columns:
nullable = choose(a[3], "nullable", "non-nullable")
d.append(" %-20s %s(%s), %s" % (a[0], a[1], a[2], nullable))
d.append("\nIndices")
for a in self.indices:
unique = choose(a[0][0], "non-unique", "unique")
cname = string.join(map(lambda x: x[4], a), ", ")
d.append(" %s index {%s} on (%s)" % (unique, a[0][1], cname))
return string.join(d, "\n")
class IniParser:
def __init__(self, cfg, key='name'):
self.key = key
self.records = {}
self.ctypeRE = re.compile("\[(jdbc|odbc|default)\]")
self.entryRE = re.compile("([a-zA-Z]+)[ \t]*=[ \t]*(.*)")
self.cfg = cfg
self.parse()
def parse(self):
fp = open(self.cfg, "r")
data = fp.readlines()
fp.close()
lines = filter(lambda x: len(x) > 0 and x[0] not in ['#', ';'], map(string.strip, data))
current = None
for i in range(len(lines)):
line = lines[i]
g = self.ctypeRE.match(line)
if g: # a section header
current = {}
if not self.records.has_key(g.group(1)):
self.records[g.group(1)] = []
self.records[g.group(1)].append(current)
else:
g = self.entryRE.match(line)
if g:
current[g.group(1)] = g.group(2)
def __getitem__(self, (ctype, skey)):
if skey == self.key: return self.records[ctype][0][skey]
t = filter(lambda x, p=self.key, s=skey: x[p] == s, self.records[ctype])
if not t or len(t) > 1:
raise KeyError, "invalid key ('%s', '%s')" % (ctype, skey)
return t[0]
def random_table_name(prefix, num_chars):
import random
d = [prefix, '_']
i = 0
while i < num_chars:
d.append(chr(int(100 * random.random()) % 26 + ord('A')))
i += 1
return string.join(d, "")
class ResultSetRow:
def __init__(self, rs, row):
self.row = row
self.rs = rs
def __getitem__(self, i):
if type(i) == type(""):
i = self.rs.index(i)
return self.row[i]
def __getslice__(self, i, j):
if type(i) == type(""): i = self.rs.index(i)
if type(j) == type(""): j = self.rs.index(j)
return self.row[i:j]
def __len__(self):
return len(self.row)
def __repr__(self):
return str(self.row)
class ResultSet:
def __init__(self, headers, results=[]):
self.headers = map(lambda x: x.upper(), headers)
self.results = results
def index(self, i):
return self.headers.index(string.upper(i))
def __getitem__(self, i):
return ResultSetRow(self, self.results[i])
def __getslice__(self, i, j):
return map(lambda x, rs=self: ResultSetRow(rs, x), self.results[i:j])
def __repr__(self):
return "<%s instance {cols [%d], rows [%d]} at %s>" % (self.__class__, len(self.headers), len(self.results), id(self))
|
carvalhomb/tsmells
|
guess/src/Lib/dbexts.py
|
Python
|
gpl-2.0
| 21,345
|
[
"Brian"
] |
41973adf6984adb105971f50b02cd891497835267e01422ae1b16c360627b47e
|
import glob, pyraf, time, numpy
from scipy import interpolate
from fitsfitter_MOD import *
from flux_level import *
from flux_level2 import *
from plotFLUX import *
#Retrieve dictionary
lib_path = os.path.abspath('/Users/npastorello/Desktop/Galaxies/General_studies/')
sys.path.append(lib_path)
from galaxyParametersDictionary_v5 import *
###############
### CLASSES ###
###############
class mask():
#
def __init__(self):
self.name, self.RA_c, self.Dec_c = '', nan, nan # RA_c and Dec_c are the centre coordinates
self.maskPA, self.PA = nan, nan #maskPA is the real PA of the mask in the sky, PA that in the header
self.galaxy, self.gal_RA, self.gal_Dec = '', nan, nan
self.gal_PA0, self.gal_Re, self.gal_ba = nan, nan, nan
self.header = {}
self.listSlits = []
#
def assignGalaxy(self, namegal):
self.galaxy = namegal
RA0, Dec0 = CentreCoordinates[namegal]
self.gal_RA, self.gal_Dec = convAngCoord(RA0)[4]*15., convAngCoord(Dec0)[4] #in degrees
self.gal_PA0, self.gal_Re, self.gal_ba = PA0[namegal], Reff[namegal]/3600., b_a[namegal]
#
def assignMaskPars(self, filename): #Takes the fits file name from which extract the header parameters
inTmp = pyfits.open(filename)
self.header = inTmp[0].header
#
self.name = self.header['OBJECT']
self.RA_c, self.Dec_c = convAngCoord(self.header['RA'])[4]*15., convAngCoord(self.header['DEC'])[4] # in degrees
#
self.PA = (90.+self.header['SKYPA1'])
deltaPA = mod(self.PA-self.gal_PA0, 360.) #Difference between mask alignment and galaxy PA0
#
angleNE = mod(90.-self.gal_PA0, 360) #angle between galaxy major axis and East axis
self.maskPA = -mod(angleNE+deltaPA, 360) #angle between the East-West axis and the mask alignment
#
def createSlits(self, listFiles): #Creates a list of objects 'slit' with the proper coordinates and parameters
#
for ii in numpy.arange(len(listFiles)):
#
tmpName = listFiles[ii]
hdu_tmp = pyfits.open(listFiles[ii])
hdr_tmp = hdu_tmp[0].header
#
if (hdr_tmp['RA'] != '0.0') and (hdr_tmp['DEC'] != '0.0'):
#
dummyRA, dummyDec = hdr_tmp['RA'], hdr_tmp['DEC']
#
RAslit = convAngCoord(dummyRA)[4]*15.
Decslit = convAngCoord(dummyDec)[4]
else: # SERENDIPITY OBJECTS DON'T HAVE COORDINATES
#looking for associated object for coordinates
try:
indexSlitAssociated = findPosAssociatedSlit(ii, listFiles)
hdu_tmp2 = pyfits.open(listFiles[indexSlitAssociated])
hdr_tmp2 = hdu_tmp2[0].header
dummyRA, dummyDec = hdr_tmp2['RA'], hdr_tmp2['DEC']
#
RAslit = convAngCoord(dummyRA)[4]*15.
Decslit = convAngCoord(dummyDec)[4]
except:
RAslit, Decslit = nan, nan
#
#
distRA = RAslit-self.RA_c #Distance from mask CentreCoordinates (in degrees)
distDEC = Decslit-self.Dec_c #Distance from mask centre (in degrees)
#
angrot = self.maskPA*numpy.pi/180.
realRA = (distRA*numpy.cos(angrot)-distDEC*numpy.sin(angrot))+self.RA_c #Coordinates given the rotation of the mask
realDEC = (distRA*numpy.sin(angrot)+distDEC*numpy.cos(angrot))+self.Dec_c #Coordinates given the rotation of the mask
#
slitTmp = slit()
slitTmp.assignSlitPars([tmpName, realRA, realDEC, self.maskPA, 'tar'])
self.listSlits.append(slitTmp)
#
def assignExtraParametersSlits(self, filename): #To assign length, centre position in the mask
inputFits = pyfits.open(filename)
slitParLayer = inputFits[11].data
for ii in self.listSlits:
for jj in slitParLayer:
if ii.ID == jj[9]:
ii.slitlength = jj[5] #in arcsec
ii.PArel = self.PA-jj[6]
class slit(mask):
#
def __init__(self):
self.name, self.ID, self.PA, self.Dec, self.RA, self.action = '', '', nan, 0, nan, ''
self.wv, self.flux, self.ivar = numpy.array([]), numpy.array([]), numpy.array([])
self.skyspec, self.totspec, = numpy.array([]), numpy.array([])
self.skyindex, self.slitlength, self.PArel = nan, nan, nan #PArel is the PA relative to the mask
#
self.check, self.heliocentric_corr = False, nan
#
self.vel, self.sigma, self.h3, self.h4, self.chi2 = nan, nan, nan, nan, nan #from pPXF
self.errvel, self.errsigma, self.errh3, self.errh4 = nan, nan, nan, nan #from pPXF
self.ppxf_obj, self.wv_bestfit = None, numpy.array([])
#
def assignSlitPars(self, listValues):
self.name, self.RA, self.Dec, self.PA, self.action = listValues
self.ID = findListNumber(self.name)
#
def assignSpectrum(self, wv, totSpec, skySpec, subSpec, ivarSpec):
self.wv, self.flux, self.ivar = wv, subSpec, ivarSpec
self.skyspec, self.totspec = skySpec, totSpec
#################
### FUNCTIONS ###
#################
def plotSpec(fitsfile, ax=''):
if ax == '':
ax = subplot(111)
tmpIn = pyfits.open(fitsfile)
flux = tmpIn[0].data
wv = numpy.arange(len(flux))*tmpIn[0].header['CDELT1'] + tmpIn[0].header['CRVAL1']
ax.plot(wv, flux)
def findListNumber(stringName):
for ii in numpy.arange(len(stringName)):
if stringName[ii] == '.':
return stringName[ii+1:ii+4]
def findPosAssociatedSlit(index,listFILES):
#
# Look for slit number
#
tmp = []
for ii in listFILES:
tmp.append(findListNumber(ii))
#
listSameNumber = numpy.nonzero(numpy.array(tmp) == findListNumber(listFILES[index]))
for ii in listSameNumber[0]:
if not('serendip' in listFILES[ii]):
return ii
def weighted_std(values, weights):
average = numpy.average(values, weights=weights)
variance = numpy.average((values-average)**2, weights=weights) # Fast and numerically precise
return (numpy.sqrt(variance))
# Montecarlo run for pPXF
def runMC(pp, dv, velScale, start, nReal=100, quiet=True):
import ppxf
#
ppMC = []
#
for ii in numpy.arange(nReal):
#
if verbose:
stdout.write("\r MC %i percent " % (((ii)+1)*100./nReal))
stdout.flush()
#
# Create fake spectrum
numpy.random.seed(int(time.time()*1e6 % 1e5))
#
fakeGal = numpy.random.normal(pp.bestfit, pp.noise)
#
# Run pPXF over fake spectrum and store pPXF object
ppMC.append(ppxf.ppxf(pp.star, fakeGal, pp.noise, velScale, start,
plot=False, moments=pp.moments, degree=pp.degree, vsyst=dv, quiet=quiet,
bias=pp.bias, oversample=pp.oversample)
)
#
if verbose:
stdout.write("\n")
return ppMC
def extractErrorKin(ppxfObjList):
#
errorKin = []
#
for ii in numpy.arange(ppxfObjList[0].moments):
tmpList, tmpListWeights = [], []
for jj in ppxfObjList:
tmpList.append(jj.sol[ii])
tmpListWeights.append(jj.error[ii]) #The weights are the errors on the single kin measurements
#
errorKin.append(weighted_std(tmpList, 1./numpy.array(tmpListWeights)))
#
return numpy.array(errorKin)
##### SKiMS reduction ######
def runSKiMSreduction(Deimos_mask):
### EXTRACTING SPECTRA FROM spec1d FILES
# The output files (should be) already dispersion corrected
# Also the .asc files are created
print "EXTRACTING SPECTRA FROM FITS FILES"
for ii in glob.glob('*.?.*fits'): os.remove(ii)
#
if not(os.path.exists('./login.cl')):
os.system('mkiraf')
#
runFitsfitter(verbose=True)
print "\nDONE"
#
### EXTRACTING COORDINATES FROM fits FILES AND SAVE "DATA.DAT" file
#listFILES=asciidata.open('../R_sky/D_listaSPEC.txt')
#
print "EXTRACTING COORDINATES FROM FITS FILES"
listFILES = glob.glob('*.r.sky.fits')
Deimos_mask.createSlits(listFILES)
#
listNAM, listRA, listDEC, action = [], [], [], []
for ii in Deimos_mask.listSlits:
listNAM.append(ii.name)
listRA.append(ii.RA)
listDEC.append(ii.Dec)
action.append(ii.action)
#
outputTab = numpy.transpose(numpy.array([listNAM, action, listRA, listDEC]))
numpy.savetxt('./DATA.DAT',outputTab,fmt='%s',header='file\taction\tRAdeg\tDecdeg',newline='\n',delimiter='\t')
print "\nDONE"
#
### MEASURING SKY INDEX ON ALL THE SPECTRA
print "MEASURING SKY INDEX IN ALL THE SPECTRA"
fluxLevel(Deimos_mask, inputFile='DATA.DAT', outputFile='DATA2.DAT', setLines='standard')
print "\nDONE"
#
### SELECTION SKY SPECTRA
print "SELECTION SKY SPECTRA"
plotFlux(Deimos_mask, inputFile='DATA2.DAT')
print "\nDONE"
#
### SKY_SUBTRACT
print "CREATING MASTER SKY SPECTRUM"
#
### Cleaning dir
if len(glob.glob('./sky*fits')) > 0:
for ii in glob.glob('./sky*fits'):
os.remove(ii)
#
if len(glob.glob('./sky*asc')) > 0:
for ii in glob.glob('./sky*asc'):
os.remove(ii)
#
filename = './DATA2.DAT'
flag = True
while flag: #Check at least 1 sky-dominated spectrum exists
listFILES = asciidata.open(filename)
listNAMsky, listNAM, action = [], [], []
l1, l2, l3, l4 = [], [], [], []
#
for ii in range(0,len(listFILES[0])):
if (listFILES[1][ii] == 'sky'):
listNAMsky.append("./"+listFILES[0][ii])
listNAM.append(listFILES[0][ii])
l1.append(listFILES[4][ii])
l2.append(listFILES[5][ii])
l3.append(listFILES[6][ii])
l4.append(listFILES[7][ii])
#
if (len(listNAMsky) > 0):
flag = False
else:
raw_input("No SKY spectra selected in '"+filename+"'. Please fix it and press a button.\n")
#
if (os.path.isfile("./temp_sky.lis")): os.system("rm ./temp_sky.lis")
if (os.path.isfile("./temp_gal.lis")): os.system("rm ./temp_gal.lis")
#
numpy.savetxt("./temp_sky.lis",listNAMsky,fmt='%s')
numpy.savetxt("./temp_gal.lis",listNAM,fmt='%s')
#
############### Deleting old files
#
if os.path.exists('./temp.lis'): os.remove('./temp.lis')
#
if os.path.exists('./temp_sky.fits'): os.remove('./temp_sky.fits')
#
if os.path.exists('./norm_sky.fits'): os.remove('./norm_sky.fits')
#
if os.path.exists('./temp_bands.lis'): os.remove('./temp_bands.lis')
#
if os.path.exists('./temp_sky_bands.lis'): os.remove('./temp_sky_bands.lis')
#
############### Make sky estimates
#
fiddle = 1.00
#
# Create sky master spec
pyraf.iraf.onedspec.scombine("@./temp_sky.lis",
"sky.fits", noutput="", logfile="STDOUT", apertures="", group="apertures",
combine="sum", reject="none", first='no', w1='INDEF', w2='INDEF', dw='INDEF',
nw='INDEF', scale="none", zero="none", weight="none", sample="",
lthreshold='INDEF', hthreshold='INDEF', nlow=1, nhigh=1, nkeep=1, mclip='yes',
lsigma=3., hsigma=3., rdnoise="0.", gain="1.", snoise="0.", sigscale=0.1,
pclip=-0.5, grow=0, blank=0.)
#
#
# Convert sky master spec into txt table
pyraf.iraf.onedspec.wspec("./sky.fits","./sky.asc", header='no', wformat="")
#
# plt.ion()
# fig = figure()
# clf()
# ax = subplot(111)
# plotSpec('sky.fits',ax=ax)
#
#####################
#
############ Measure fluxes in sky estimates
#
os.system("ls ./sky.fits > ./temp.lis")
#
run_flux_level2()
#
pyraf.iraf.columns("./temp_levels.lis", 5, outroot="./col.")
os.system('mv ./col.3 ./temp_sky_bands.lis')
pyraf.iraf.delete("./col.*", 'yes', verify='no', default_acti='yes', allversions='yes', subfiles='yes')
#
#####################
#
#
############ Normalise sky estimate
#
flux_level = numpy.loadtxt("./temp_sky_bands.lis")
pyraf.iraf.imarith("./sky.fits", "/", flux_level, "./norm_sky.fits",
title="", divzero=0., hparams="", pixtype="", calctype="", verbose='no',
noact='no')
#
os.remove('./temp.lis')
#
#####################
#
############ Measure fluxes in target frames and remove sky
### 1st mask
#
os.system('cp ./temp_gal.lis ./temp.lis')
# cat ./temp.lis
#
run_flux_level2()
#
pyraf.iraf.columns("./temp_levels.lis", 5, outroot="./col.")
os.system("mv ./col.3 ./temp_bands.lis")
pyraf.iraf.delete("./col.*", 'yes', verify='no', default_acti='yes', allversions='yes', subfiles='yes')
#
listFiles = numpy.loadtxt("./temp_gal.lis",dtype='S50')
flux_level = numpy.loadtxt("./temp_bands.lis",dtype='f50')
#
if len(glob.glob('sub_*.txt'))>0: os.system('rm sub_*.txt')
#
tmpInput = pyfits.open('norm_sky.fits')
fluxSkyMaster = tmpInput[0].data
#
print "DONE\n"
#
#### CUT SKY SPECTRUM TO MATCH WITH THE SCIENCE SPECTRUM
#
print "SUBTRACTING MASTER SKY FROM ALL THE SPECTRA"
#
# INTERPOLATE SKY SPECTRUM
flux_mastersky_norm = tmpInput[0].data
wv_mastersky_norm = numpy.arange(len(tmpInput[0].data))*tmpInput[0].header['CDELT1']+tmpInput[0].header['CRVAL1']
f_mastersky_norm = interpolate.interp1d(wv_mastersky_norm, flux_mastersky_norm)
max_Sky_wv, min_Sky_wv = numpy.max(wv_mastersky_norm), numpy.min(wv_mastersky_norm)
#
# EXTRAPOLATE SKY SPECTRUM ON SAME WAVELENGTH ARRAY AS THE SCIENCE SPECTRUM
for ii in numpy.arange(len(Deimos_mask.listSlits)):
#
stdout.write("\r Subtracting spectrum %i / %i " % ((ii)+1, len(Deimos_mask.listSlits)))
stdout.flush()
#
inputSpecFile = pyfits.open(Deimos_mask.listSlits[ii].name)
flux_spectrum = inputSpecFile[0].data
inputIvarFile = pyfits.open(Deimos_mask.listSlits[ii].name[:-5]+'ivar.fits')
ivar_spectrum = inputIvarFile[0].data
wv_spectrum = numpy.arange(len(inputSpecFile[0].data))*inputSpecFile[0].header['CDELT1']+inputSpecFile[0].header['CRVAL1']
#
selWV = numpy.nonzero((wv_spectrum <= max_Sky_wv) & (wv_spectrum >= min_Sky_wv))
flux_spectrum_cut, wv_spectrum_cut, ivar_spectrum_cut = flux_spectrum[selWV], wv_spectrum[selWV], ivar_spectrum[selWV]
#
flux_sky_norm_extrapolated = f_mastersky_norm(wv_spectrum_cut)
#
# Multiplying sky per flux level
flux_sky_extrapolated = flux_sky_norm_extrapolated*fiddle*flux_level[ii]
# Subtracting sky from spectrum
sub_spectrum = flux_spectrum_cut-flux_sky_extrapolated
#
# save in objects
Deimos_mask.listSlits[ii].assignSpectrum(wv_spectrum_cut, flux_spectrum_cut, flux_sky_extrapolated,
sub_spectrum, ivar_spectrum_cut)
#
outputTab = numpy.transpose(numpy.array([wv_spectrum_cut, sub_spectrum, ivar_spectrum_cut]))
numpy.savetxt('sub_'+listFiles[ii][:-4]+'txt', outputTab, fmt='%.10f', delimiter='\t', newline='\n',header='WV\tFLUX\tIVAR')
#
print "\nDONE"
##### Check Spectra ######
def runCheckSpectra(Deimos_mask, galname='NGC1023'):
#
plt.ion()
#
RA, Dec = [], []
for ii in Deimos_mask.listSlits:
RA.append(ii.RA)
Dec.append(ii.Dec)
#
RA, Dec = numpy.array(RA), numpy.array(Dec)
selNotNan = numpy.nonzero(~(isnan(RA)))
minRA, maxRA = numpy.min(RA[selNotNan]), numpy.max(RA[selNotNan])
minDec, maxDec = numpy.min(Dec[selNotNan]), numpy.max(Dec[selNotNan])
#
fig = figure(0, figsize=(12.6,6))
ax2 = subplot(122)
ax2.scatter(Deimos_mask.gal_RA, Deimos_mask.gal_Dec, marker='x', color='r')
radiuses = numpy.array([1,2,3])
ells = [Ellipse(xy=[Deimos_mask.gal_RA, Deimos_mask.gal_Dec], width=(2.*ii*Deimos_mask.gal_Re/numpy.sqrt(Deimos_mask.gal_ba)),
height=(2.*ii*Deimos_mask.gal_Re*numpy.sqrt(Deimos_mask.gal_ba)), angle=90-Deimos_mask.gal_PA0,
edgecolor = 'k', facecolor = 'none', fill = False, linestyle = 'dashed') for ii in radiuses]
#
for ee in ells: ax2.add_artist(ee)
#
ax2.set_xlim([maxRA, minRA])
ax2.set_ylim([minDec, maxDec])
ax1 = subplot(121)
#
listNAME, listRA, listDEC, listCheck = [], [], [], []
for sel in Deimos_mask.listSlits:
cla()
ax1 = subplot(121)
wv, flux, ivar = sel.wv, sel.flux, sel.ivar
ax1.plot(wv, flux)
ax1.set_xlim([8500,8800])
ax1.set_ylim([numpy.median(flux)/3,numpy.median(flux)*2])
#
answer = raw_input()
if answer == '':
cc = 'b'
listCheck.append(True)
sel.check = True
else:
cc = 'k'
listCheck.append(False)
sel.check = False
ax2.scatter(sel.RA, sel.Dec, color=cc, marker='.')
draw()
#
return True
# To save plot of distro
def drawDistro(Deimos_mask, galname='NGC1023', outputname='outputSlit.pdf'):
#
RA, Dec, check, name = [], [], [], []
for ii in Deimos_mask.listSlits:
RA.append(ii.RA)
Dec.append(ii.Dec)
check.append(ii.check)
name.append(ii.name)
#
fig = figure(num=1, figsize=(6,5))
ax = subplot(111)
#
ax.scatter(Deimos_mask.gal_RA, Deimos_mask.gal_Dec, marker='x', color='r')
radiuses = numpy.array([1,2,3])
ells = [Ellipse(xy=[Deimos_mask.gal_RA, Deimos_mask.gal_Dec], width=(2.*ii*Deimos_mask.gal_Re/numpy.sqrt(Deimos_mask.gal_ba)),
height=(2.*ii*Deimos_mask.gal_Re*numpy.sqrt(Deimos_mask.gal_ba)), angle=90-Deimos_mask.gal_PA0,
edgecolor = 'k', facecolor = 'none', fill = False, linestyle = 'dashed') for ii in radiuses]
#
for ee in ells:
ax.add_artist(ee)
#
ax.set_xlim([40.12999999,40.0399999])
#
ax.scatter(RA, Dec, color='k')
selGood = numpy.nonzero(numpy.array(check))
ax.scatter(numpy.array(RA)[selGood], numpy.array(Dec)[selGood], color='g')
#
ax.scatter(Deimos_mask.RA_c, Deimos_mask.Dec_c, color='b', marker='x')
#
for ii in Deimos_mask.listSlits:
ax.annotate(ii.name[12:-11], xy=(ii.RA, ii.Dec))
#
draw()
savefig(outputname, bbox_inches='tight')
return True
# Finding Heliocentric corrections for the mask
def findHeliocentricCorr(Deimos_mask):
year, month, day = Deimos_mask.header['DATE-OBS'][0:4], Deimos_mask.header['DATE-OBS'][5:7], Deimos_mask.header['DATE-OBS'][8:10]
RAt, Dect, UTCt = Deimos_mask.header['RA'], Deimos_mask.header['Dec'], Deimos_mask.header['UTC']
RAh, Decd, UTh = convAngCoord(RAt)[4], convAngCoord(Dect)[4], convAngCoord(UTCt)[4]
#
pyraf.iraf.rv()
pyraf.iraf.rv.rvcorrect(epoch='2000', observa='keck', vsun='20', ra_vsun='18', dec_vsu='30', epoch_vsu='1900',
year=year, month=month, day=day, ut=UTh, ra=RAh, dec=Decd)
heliocentric_corr = float(raw_input('write heliocentric correction: '))
#
print "\n\n\n"
return heliocentric_corr
def run_pPXF(Deimos_mask, templates='deimos', cutRegion = [8450, 8750]):
#
lib_path = os.path.abspath('/Users/npastorello/Desktop/CaTPros/pPXF_files')
sys.path.append(lib_path)
import ppxf, ppxf_util
#
heliocentric_corr = findHeliocentricCorr(Deimos_mask)
#
if templates == 'deimos': #Standard stars observed with DEIMOS
#
# Saving templates
listTemplates = glob.glob('/Users/npastorello/Desktop/CaTPros/DEIMOS_StellarTemplate/*.fits') #DEIMOS TEMPLATES
elif templates == 'cenarro':
listTemplates = glob.glob('/Volumes/G2/exLustre/REDUCTION/JacobReductionSkims/stellartemplates/cat_spec_fits/scan*.fits')
#
tmp = pyfits.open(listTemplates[0])
lenTemp = len(tmp[0].data)
wvTemp = numpy.arange(lenTemp)*tmp[0].header['CDELT1']+tmp[0].header['CRVAL1']
#selCut = numpy.nonzero((wvTemp < 8750.) & (wvTemp > 8450.))
#wvTemp_cut, fluxTemp_cut = wvTemp[selCut], tmp[0].data[selCut]
wvTemp_cut, fluxTemp_cut = wvTemp, tmp[0].data
#
sspTempl, logLam2, velscale = ppxf_util.log_rebin([wvTemp_cut[0],wvTemp_cut[-1]], fluxTemp_cut)
templatesArray = numpy.empty((len(sspTempl),len(listTemplates)))
#
counter = 0
for ii in listTemplates:
tmp = pyfits.open(ii)
fluxTemp_cut = tmp[0].data#[selCut]
sspNew, logLam2, velscale = ppxf_util.log_rebin([wvTemp_cut[0],wvTemp_cut[-1]], fluxTemp_cut, velscale=velscale)
templatesArray[:,counter] = sspNew
counter += 1
#
if not(os.path.exists('FitSpectra')): os.mkdir('FitSpectra')
#
if not(os.path.exists('Output')): os.mkdir('Output')
#
listFail = []
for ii in Deimos_mask.listSlits:
try:
dicOutput = {}
#
raSlit, decSlit, nameSlit = ii.RA, ii.Dec, ii.name
ii.heliocentric_corr = heliocentric_corr
#
# Reading spectrum
wv, flux, ivar = ii.wv, ii.flux, ii.ivar
#
# Applying heliocentric correction with IRAF
#
wv = wv+(heliocentric_corr*wv/c)
#
#rescaling wv onto same wv scale as templates
newWV = numpy.arange(numpy.min(wv), numpy.max(wv), tmp[0].header['CDELT1'])
functFlux = scipy.interpolate.interp1d(wv, flux)
newFlux = functFlux(newWV)
functIvar = scipy.interpolate.interp1d(wv, ivar)
newIvar = functIvar(newWV)
#
# Cutting spectrum in CaT region
#
selCut = numpy.nonzero((newWV < cutRegion[1]) & (newWV > cutRegion[0]))
wv_cut, flux_cut, ivar_cut = newWV[selCut], newFlux[selCut], newIvar[selCut]
#
#The variance of the original sky file is underestimated (it doesn't take in account the sky-subtraction)
#
lamRange1 = numpy.array([wv_cut[0], wv_cut[-1]])
galaxy, logLam1, velscale = ppxf_util.log_rebin(lamRange1, flux_cut)
#
# Normalizing spectra and noise
#
fac = numpy.median(galaxy)
galaxy /= fac # Normalize spectrum to avoid numerical issues
variance, logLam1, velscale = ppxf_util.log_rebin(lamRange1, 1./ivar_cut)
noise = numpy.sqrt(variance)/fac
#
if templates == 'cenarro':
print 'Not yet implemented.'
return False
#resTempl = 1.5 #22km/s 1.5 AA (FWHM), sigma~22 km/s, R~5700
elif templates == 'deimos': # NOT CONVOLVING BY THE RESOLUTION, BECAUSE THE INSTRUMENT IS THE SAME
#
dummy = True
#
dv = (logLam2[0]-logLam1[0])*c # km/s
vel = vel0[Deimos_mask.galaxy]
goodPixels = ppxf_util.determine_goodpixels(logLam1, [wvTemp[0],wvTemp[-1]], vel)
#
start = [vel, 180.] # (km/s), starting guess for [V,sigma]
#
fig = figure(0, figsize=(6,5))
clf()
pp2 = ppxf.ppxf(templatesArray, galaxy, noise, velscale, start,
goodpixels=goodPixels,
plot=False, moments=2,
vsyst=dv, degree=4, oversample=False, quiet=True)
#
pp4 = ppxf.ppxf(templatesArray, galaxy, noise, velscale, pp2.sol[0:2],
goodpixels=goodPixels,
plot=True, moments=4,
vsyst=dv, degree=4, oversample=False)
#
ii.vel, ii.sigma, ii.h3, ii.h4 = pp4.sol
ii.ppxf_obj = pp4
ii.wv_bestfit = numpy.e**logLam1
#
pp4_MC = runMC(pp4, dv, velscale, pp2.sol[0:2], nReal=100, quiet=True)
ii.errvel, ii.errsigma, ii.errh3, ii.errh4 = extractErrorKin(pp4_MC)
#
suptitle(ii.name)
savefig('FitSpectra/'+ii.name+'.pdf',bbox_inches='tight')
#
#
# inputTab = numpy.loadtxt('DATA2.DAT', dtype={'names': ('filename', 'action', 'RA', 'Dec', 'dummy1', 'dummy2', 'dummy3', 'dummy4'),
# 'formats': ('S50', 'S3', 'f30', 'f30', 'f30', 'f30', 'f30', 'f30')})
# #
# dicOutput['nameSlit'] = nameSlit
# dicOutput['RASlit'], dicOutput['DECSlit'] = raSlit, decSlit
# dicOutput['pPXF_obj'] = pp4
# #
# dicOutput['vel'] = v_slit
# dicOutput['sigma'] = sigma_slit
# dicOutput['h3'] = h3_slit
# dicOutput['h4'] = h4_slit
# #
# dicOutput['evel'] = v_err
# dicOutput['esigma'] = sigma_err
# dicOutput['eh3'] = h3_err
# dicOutput['eh4'] = h4_err
# #
# #
# fileOut = open('Output/'+nameSlit+'.dat', 'wb')
# pickle.dump(dicOutput, fileOut)
# fileOut.close()
except:
listFail.append(ii.name)
return listFail
def run_saveKinPlot(Deimos_mask, outputFile='kinplot.pdf',
xlimits=[300,-300]): #In arcsec
### Create 2d kinematic plot
fig = figure(num=1, figsize=(12.6, 10))
clf()
plt.subplots_adjust(hspace = .000, wspace = .000)
ax_vel = subplot(2,2,1, aspect='equal')
ax_sigma = subplot(2,2,2, aspect='equal')
ax_h3 = subplot(2,2,3, aspect='equal')
ax_h4 = subplot(2,2,4, aspect='equal')
#
RA0, Dec0 = CentreCoordinates[Deimos_mask.galaxy]
RA0deg, Dec0deg = Deimos_mask.gal_RA, Deimos_mask.gal_Dec
pa0, ba0, reff = Deimos_mask.gal_PA0, Deimos_mask.gal_ba, Deimos_mask.gal_Re
#
ax_vel.scatter(0, 0, marker='x', color='r')
ax_sigma.scatter(0, 0, marker='x', color='r')
ax_h3.scatter(0, 0, marker='x', color='r')
ax_h4.scatter(0, 0, marker='x', color='r')
#
RA, Dec, vel, sigma, h3, h4, check = [], [], [], [], [], [], []
for ii in Deimos_mask.listSlits:
RA.append((ii.RA-RA0deg)*3600.) #in arcsec
Dec.append((ii.Dec-Dec0deg)*3600.)
vel.append(ii.vel-vel0[Deimos_mask.galaxy])
sigma.append(ii.sigma)
h3.append(ii.h3)
h4.append(ii.h4)
check.append(ii.check)
#
RA, Dec = numpy.array(RA), numpy.array(Dec)
vel, sigma = numpy.array(vel), numpy.array(sigma)
h3, h4 = numpy.array(h3), numpy.array(h4)
#selgal = numpy.nonzero((RA <250) & (RA > -300) & ~(numpy.isnan(RA)))
selgal = numpy.nonzero(numpy.array(check) == True)
vpoints = ax_vel.scatter(RA[selgal], Dec[selgal], c=vel[selgal])
posAx = ax_vel.get_position()
posAx.y0, posAx.y1 = posAx.y1, posAx.y0+0.02
axCB = fig.add_axes(posAx)
CB = colorbar(vpoints, orientation='horizontal', cax=axCB)
CB.ax.xaxis.set_ticks_position('top')
#
spoints = ax_sigma.scatter(RA[selgal], Dec[selgal], c=numpy.log10(sigma[selgal]))
posAx = ax_sigma.get_position()
posAx.y0, posAx.y1 = posAx.y1, posAx.y0+0.02
axCB = fig.add_axes(posAx)
CB = colorbar(spoints, orientation='horizontal', cax=axCB)
CB.ax.xaxis.set_ticks_position('top')
#
h3points = ax_h3.scatter(RA[selgal], Dec[selgal], c=h3[selgal])
posAx = ax_h3.get_position()
posAx.y1, posAx.y0 = posAx.y0 - 0.04, posAx.y0 - 0.06
axCB = fig.add_axes(posAx)
CB = colorbar(h3points, orientation='horizontal', cax=axCB)
CB.ax.xaxis.set_ticks_position('bottom')
#
h4points = ax_h4.scatter(RA[selgal], Dec[selgal], c=h4[selgal])
posAx = ax_h4.get_position()
posAx.y1, posAx.y0 = posAx.y0 - 0.04, posAx.y0 - 0.06
axCB = fig.add_axes(posAx)
CB = colorbar(h4points, orientation='horizontal', cax=axCB)
CB.ax.xaxis.set_ticks_position('bottom')
#
ax_vel.xaxis.set_ticklabels([])
ax_vel.set_xlim(xlimits)
ax_vel.annotate('Velocity', xy=(-200,40))
ax_sigma.yaxis.set_ticklabels([])
#tmpax = ax_sigma.twinx()
#ax_sigma.xaxis.set_ticklabels([])
#tmpax.set_ylim(ax_vel.set_ylim())
ax_sigma.set_xlim(xlimits)
ax_sigma.annotate('Velocity dispersion', xy=(-100,40))
#
ax_h3.set_xlim(xlimits)
ax_h3.annotate('h3', xy=(-200,40))
#
ax_h4.yaxis.set_ticklabels([])
#tmpax = ax_h4.twinx()
#tmpax.set_ylim(ax_h3.set_ylim())
ax_h4.set_xlim(xlimits)
ax_h4.annotate('h4', xy=(-200,40))
#
radiuses = numpy.array([1,2,3])
ells = [Ellipse(xy=[0, 0], width=(2.*ii*Deimos_mask.gal_Re/numpy.sqrt(Deimos_mask.gal_ba)),
height=(2.*ii*Deimos_mask.gal_Re*numpy.sqrt(Deimos_mask.gal_ba)), angle=90-Deimos_mask.gal_PA0,
edgecolor = 'k', facecolor = 'none', fill = False, linestyle = 'dashed') for ii in radiuses]
for ee in ells:
ax_vel.add_artist(ee)
#
savefig(outputFile)
return True
|
pastorenick/SuperSKiMS_reduction
|
SKiMS_red__def__.py
|
Python
|
gpl-2.0
| 27,175
|
[
"Galaxy"
] |
c0ce686b879e10c9b317cef6f55a25a35f9323494c280e01ec40f8d8520201ea
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for consumer_tracking_pipeline_visitor."""
# pytype: skip-file
from __future__ import absolute_import
import logging
import unittest
import apache_beam as beam
from apache_beam import pvalue
from apache_beam.pipeline import Pipeline
from apache_beam.pvalue import AsList
from apache_beam.runners.direct import DirectRunner
from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import ConsumerTrackingPipelineVisitor
from apache_beam.transforms import CoGroupByKey
from apache_beam.transforms import Create
from apache_beam.transforms import DoFn
from apache_beam.transforms import FlatMap
from apache_beam.transforms import Flatten
from apache_beam.transforms import ParDo
# Disable frequent lint warning due to pipe operator for chaining transforms.
# pylint: disable=expression-not-assigned
# pylint: disable=pointless-statement
class ConsumerTrackingPipelineVisitorTest(unittest.TestCase):
def setUp(self):
self.pipeline = Pipeline(DirectRunner())
self.visitor = ConsumerTrackingPipelineVisitor()
try: # Python 2
self.assertCountEqual = self.assertItemsEqual
except AttributeError: # Python 3
pass
def test_root_transforms(self):
root_read = beam.Impulse()
root_flatten = Flatten(pipeline=self.pipeline)
pbegin = pvalue.PBegin(self.pipeline)
pcoll_read = pbegin | 'read' >> root_read
pcoll_read | FlatMap(lambda x: x)
[] | 'flatten' >> root_flatten
self.pipeline.visit(self.visitor)
root_transforms = [t.transform for t in self.visitor.root_transforms]
self.assertCountEqual(root_transforms, [root_read, root_flatten])
pbegin_consumers = [
c.transform for c in self.visitor.value_to_consumers[pbegin]
]
self.assertCountEqual(pbegin_consumers, [root_read])
self.assertEqual(len(self.visitor.step_names), 3)
def test_side_inputs(self):
class SplitNumbersFn(DoFn):
def process(self, element):
if element < 0:
yield pvalue.TaggedOutput('tag_negative', element)
else:
yield element
class ProcessNumbersFn(DoFn):
def process(self, element, negatives):
yield element
def _process_numbers(pcoll, negatives):
first_output = (
pcoll
| 'process numbers step 1' >> ParDo(ProcessNumbersFn(), negatives))
second_output = (
first_output
| 'process numbers step 2' >> ParDo(ProcessNumbersFn(), negatives))
output_pc = ((first_output, second_output)
| 'flatten results' >> beam.Flatten())
return output_pc
root_read = beam.Impulse()
result = (
self.pipeline
| 'read' >> root_read
| ParDo(SplitNumbersFn()).with_outputs('tag_negative', main='positive'))
positive, negative = result
_process_numbers(positive, AsList(negative))
self.pipeline.visit(self.visitor)
root_transforms = [t.transform for t in self.visitor.root_transforms]
self.assertEqual(root_transforms, [root_read])
self.assertEqual(len(self.visitor.step_names), 5)
self.assertEqual(len(self.visitor.views), 1)
self.assertTrue(isinstance(self.visitor.views[0], pvalue.AsList))
def test_co_group_by_key(self):
emails = self.pipeline | 'email' >> Create([('joe', 'joe@example.com')])
phones = self.pipeline | 'phone' >> Create([('mary', '111-222-3333')])
{'emails': emails, 'phones': phones} | CoGroupByKey()
self.pipeline.visit(self.visitor)
root_transforms = [t.transform for t in self.visitor.root_transforms]
self.assertEqual(len(root_transforms), 2)
self.assertGreater(
len(self.visitor.step_names), 3) # 2 creates + expanded CoGBK
self.assertEqual(len(self.visitor.views), 0)
def test_visitor_not_sorted(self):
p = Pipeline()
# pylint: disable=expression-not-assigned
from apache_beam.testing.test_stream import TestStream
p | TestStream().add_elements(['']) | beam.Map(lambda _: _)
original_graph = p.to_runner_api(return_context=False)
out_of_order_graph = p.to_runner_api(return_context=False)
root_id = out_of_order_graph.root_transform_ids[0]
root = out_of_order_graph.components.transforms[root_id]
tmp = root.subtransforms[0]
root.subtransforms[0] = root.subtransforms[1]
root.subtransforms[1] = tmp
p = beam.Pipeline().from_runner_api(
out_of_order_graph, runner='BundleBasedDirectRunner', options=None)
v_out_of_order = ConsumerTrackingPipelineVisitor()
p.visit(v_out_of_order)
p = beam.Pipeline().from_runner_api(
original_graph, runner='BundleBasedDirectRunner', options=None)
v_original = ConsumerTrackingPipelineVisitor()
p.visit(v_original)
# Convert to string to assert they are equal.
out_of_order_labels = {
str(k): [str(t) for t in v_out_of_order.value_to_consumers[k]]
for k in v_out_of_order.value_to_consumers
}
original_labels = {
str(k): [str(t) for t in v_original.value_to_consumers[k]]
for k in v_original.value_to_consumers
}
self.assertDictEqual(out_of_order_labels, original_labels)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
|
iemejia/incubator-beam
|
sdks/python/apache_beam/runners/direct/consumer_tracking_pipeline_visitor_test.py
|
Python
|
apache-2.0
| 5,990
|
[
"VisIt"
] |
f41cfc5556e7a8b4dac848309669c01e90610373cd76d3d97868f517d9f01b65
|
# coding: utf-8
"""
Collection of low-level tools that faciliate the interface with resource managers.
The preferred way of importing this module is:
import qutils as qu
"""
from monty.string import is_string
from pymatgen.core.units import Time, Memory
import logging
logger = logging.getLogger(__name__)
def slurm_parse_timestr(s):
"""
A slurm time parser. Accepts a string in one the following forms:
# "days-hours",
# "days-hours:minutes",
# "days-hours:minutes:seconds".
# "minutes",
# "minutes:seconds",
# "hours:minutes:seconds",
Returns:
Time in seconds.
Raises:
`ValueError` if string is not valid.
"""
days, hours, minutes, seconds = 0, 0, 0, 0
if type(s) == type(1):
return Time(s, "s")
if '-' in s:
# "days-hours",
# "days-hours:minutes",
# "days-hours:minutes:seconds".
days, s = s.split("-")
days = int(days)
if ':' not in s:
hours = int(float(s))
elif s.count(':') == 1:
hours, minutes = map(int, s.split(':'))
elif s.count(':') == 2:
hours, minutes, seconds = map(int, s.split(':'))
else:
raise ValueError("More that 2 ':' in string!")
else:
# "minutes",
# "minutes:seconds",
# "hours:minutes:seconds",
if ':' not in s:
minutes = int(float(s))
elif s.count(':') == 1:
minutes, seconds = map(int, s.split(':'))
elif s.count(':') == 2:
hours, minutes, seconds = map(int, s.split(':'))
else:
raise ValueError("More than 2 ':' in string!")
return Time((days*24 + hours)*3600 + minutes*60 + seconds, "s")
def time2slurm(timeval, unit="s"):
"""
Convert a number representing a time value in the given unit (Default: seconds)
to a string following the slurm convention: "days-hours:minutes:seconds".
>>> assert time2slurm(61) == '0-0:1:1' and time2slurm(60*60+1) == '0-1:0:1'
>>> assert time2slurm(0.5, unit="h") == '0-0:30:0'
"""
d, h, m, s = 24*3600, 3600, 60, 1
timeval = Time(timeval, unit).to("s")
days, hours = divmod(timeval, d)
hours, minutes = divmod(hours, h)
minutes, secs = divmod(minutes, m)
return "%d-%d:%d:%d" % (days, hours, minutes, secs)
def time2pbspro(timeval, unit="s"):
"""
Convert a number representing a time value in the given unit (Default: seconds)
to a string following the PbsPro convention: "hours:minutes:seconds".
>>> assert time2pbspro(2, unit="d") == '48:0:0'
"""
h, m, s = 3600, 60, 1
timeval = Time(timeval, unit).to("s")
hours, minutes = divmod(timeval, h)
minutes, secs = divmod(minutes, m)
return "%d:%d:%d" % (hours, minutes, secs)
def time2loadlever(timeval, unit="s"):
"""
Convert a number representing a time value in the given unit (Default: seconds)
to a string following the LoadLever convention. format hh:mm:ss (hours:minutes:seconds)
>>> assert time2loadlever(2, unit="d") == '48:00:00'
"""
h, m, s = 3600, 60, 1
timeval = Time(timeval, unit).to("s")
hours, minutes = divmod(timeval, h)
minutes, secs = divmod(minutes, m)
return "%d:%02d:%02d" % (hours, minutes, secs)
def timelimit_parser(s):
"""Convert a float or a string into time in seconds."""
try:
return Time(float(s), "s")
except ValueError:
return slurm_parse_timestr(s)
def any2mb(s):
"""Convert string or number to memory in megabytes."""
if is_string(s):
return int(Memory.from_string(s).to("Mb"))
else:
return int(s)
|
dongsenfo/pymatgen
|
pymatgen/io/abinit/qutils.py
|
Python
|
mit
| 3,697
|
[
"pymatgen"
] |
867df861b5aac0704f6537483dfe2d944f78641d71dacff0d9076fb97853609e
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import msrest.serialization
class ARMBaseModel(msrest.serialization.Model):
"""Represents the base class for all object models.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ARMBaseModel, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class Addon(ARMBaseModel):
"""Role Addon.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ArcAddon, IoTAddon.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param kind: Required. Addon type.Constant filled by server. Possible values include:
"IotEdge", "ArcForKubernetes".
:type kind: str or ~azure.mgmt.databoxedge.v2020_09_01.models.AddonType
:ivar system_data: Addon type.
:vartype system_data: ~azure.mgmt.databoxedge.v2020_09_01.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'required': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
_subtype_map = {
'kind': {'ArcForKubernetes': 'ArcAddon', 'IotEdge': 'IoTAddon'}
}
def __init__(
self,
**kwargs
):
super(Addon, self).__init__(**kwargs)
self.kind = 'Addon' # type: str
self.system_data = None
class AddonList(msrest.serialization.Model):
"""Collection of all the Role addon on the Azure Stack Edge device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The Value.
:vartype value: list[~azure.mgmt.databoxedge.v2020_09_01.models.Addon]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Addon]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AddonList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class Address(msrest.serialization.Model):
"""The shipping address of the customer.
All required parameters must be populated in order to send to Azure.
:param address_line1: The address line1.
:type address_line1: str
:param address_line2: The address line2.
:type address_line2: str
:param address_line3: The address line3.
:type address_line3: str
:param postal_code: The postal code.
:type postal_code: str
:param city: The city name.
:type city: str
:param state: The state name.
:type state: str
:param country: Required. The country name.
:type country: str
"""
_validation = {
'country': {'required': True},
}
_attribute_map = {
'address_line1': {'key': 'addressLine1', 'type': 'str'},
'address_line2': {'key': 'addressLine2', 'type': 'str'},
'address_line3': {'key': 'addressLine3', 'type': 'str'},
'postal_code': {'key': 'postalCode', 'type': 'str'},
'city': {'key': 'city', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'country': {'key': 'country', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Address, self).__init__(**kwargs)
self.address_line1 = kwargs.get('address_line1', None)
self.address_line2 = kwargs.get('address_line2', None)
self.address_line3 = kwargs.get('address_line3', None)
self.postal_code = kwargs.get('postal_code', None)
self.city = kwargs.get('city', None)
self.state = kwargs.get('state', None)
self.country = kwargs['country']
class Alert(ARMBaseModel):
"""Alert on the data box edge/gateway device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:ivar system_data: Alert generated in the resource.
:vartype system_data: ~azure.mgmt.databoxedge.v2020_09_01.models.SystemData
:ivar title: Alert title.
:vartype title: str
:ivar alert_type: Alert type.
:vartype alert_type: str
:ivar appeared_at_date_time: UTC time when the alert appeared.
:vartype appeared_at_date_time: ~datetime.datetime
:ivar recommendation: Alert recommendation.
:vartype recommendation: str
:ivar severity: Severity of the alert. Possible values include: "Informational", "Warning",
"Critical".
:vartype severity: str or ~azure.mgmt.databoxedge.v2020_09_01.models.AlertSeverity
:ivar error_details: Error details of the alert.
:vartype error_details: ~azure.mgmt.databoxedge.v2020_09_01.models.AlertErrorDetails
:ivar detailed_information: Alert details.
:vartype detailed_information: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'title': {'readonly': True},
'alert_type': {'readonly': True},
'appeared_at_date_time': {'readonly': True},
'recommendation': {'readonly': True},
'severity': {'readonly': True},
'error_details': {'readonly': True},
'detailed_information': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'title': {'key': 'properties.title', 'type': 'str'},
'alert_type': {'key': 'properties.alertType', 'type': 'str'},
'appeared_at_date_time': {'key': 'properties.appearedAtDateTime', 'type': 'iso-8601'},
'recommendation': {'key': 'properties.recommendation', 'type': 'str'},
'severity': {'key': 'properties.severity', 'type': 'str'},
'error_details': {'key': 'properties.errorDetails', 'type': 'AlertErrorDetails'},
'detailed_information': {'key': 'properties.detailedInformation', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(Alert, self).__init__(**kwargs)
self.system_data = None
self.title = None
self.alert_type = None
self.appeared_at_date_time = None
self.recommendation = None
self.severity = None
self.error_details = None
self.detailed_information = None
class AlertErrorDetails(msrest.serialization.Model):
"""Error details for the alert.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error_code: Error code.
:vartype error_code: str
:ivar error_message: Error Message.
:vartype error_message: str
:ivar occurrences: Number of occurrences.
:vartype occurrences: int
"""
_validation = {
'error_code': {'readonly': True},
'error_message': {'readonly': True},
'occurrences': {'readonly': True},
}
_attribute_map = {
'error_code': {'key': 'errorCode', 'type': 'str'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'occurrences': {'key': 'occurrences', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(AlertErrorDetails, self).__init__(**kwargs)
self.error_code = None
self.error_message = None
self.occurrences = None
class AlertList(msrest.serialization.Model):
"""Collection of alerts.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The value.
:vartype value: list[~azure.mgmt.databoxedge.v2020_09_01.models.Alert]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Alert]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AlertList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class ArcAddon(Addon):
"""Arc Addon.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param kind: Required. Addon type.Constant filled by server. Possible values include:
"IotEdge", "ArcForKubernetes".
:type kind: str or ~azure.mgmt.databoxedge.v2020_09_01.models.AddonType
:ivar system_data: Addon type.
:vartype system_data: ~azure.mgmt.databoxedge.v2020_09_01.models.SystemData
:param subscription_id: Required. Arc resource subscription Id.
:type subscription_id: str
:param resource_group_name: Required. Arc resource group name.
:type resource_group_name: str
:param resource_name: Required. Arc resource Name.
:type resource_name: str
:param resource_location: Required. Arc resource location.
:type resource_location: str
:ivar version: Arc resource version.
:vartype version: str
:ivar host_platform: Host OS supported by the Arc addon. Possible values include: "Windows",
"Linux".
:vartype host_platform: str or ~azure.mgmt.databoxedge.v2020_09_01.models.PlatformType
:ivar host_platform_type: Platform where the runtime is hosted. Possible values include:
"KubernetesCluster", "LinuxVM".
:vartype host_platform_type: str or ~azure.mgmt.databoxedge.v2020_09_01.models.HostPlatformType
:ivar provisioning_state: Addon Provisioning State. Possible values include: "Invalid",
"Creating", "Created", "Updating", "Reconfiguring", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.databoxedge.v2020_09_01.models.AddonState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'required': True},
'system_data': {'readonly': True},
'subscription_id': {'required': True},
'resource_group_name': {'required': True},
'resource_name': {'required': True},
'resource_location': {'required': True},
'version': {'readonly': True},
'host_platform': {'readonly': True},
'host_platform_type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'},
'resource_group_name': {'key': 'properties.resourceGroupName', 'type': 'str'},
'resource_name': {'key': 'properties.resourceName', 'type': 'str'},
'resource_location': {'key': 'properties.resourceLocation', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'str'},
'host_platform': {'key': 'properties.hostPlatform', 'type': 'str'},
'host_platform_type': {'key': 'properties.hostPlatformType', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ArcAddon, self).__init__(**kwargs)
self.kind = 'ArcForKubernetes' # type: str
self.subscription_id = kwargs['subscription_id']
self.resource_group_name = kwargs['resource_group_name']
self.resource_name = kwargs['resource_name']
self.resource_location = kwargs['resource_location']
self.version = None
self.host_platform = None
self.host_platform_type = None
self.provisioning_state = None
class AsymmetricEncryptedSecret(msrest.serialization.Model):
"""Represent the secrets intended for encryption with asymmetric key pair.
All required parameters must be populated in order to send to Azure.
:param value: Required. The value of the secret.
:type value: str
:param encryption_cert_thumbprint: Thumbprint certificate used to encrypt \"Value\". If the
value is unencrypted, it will be null.
:type encryption_cert_thumbprint: str
:param encryption_algorithm: Required. The algorithm used to encrypt "Value". Possible values
include: "None", "AES256", "RSAES_PKCS1_v_1_5".
:type encryption_algorithm: str or
~azure.mgmt.databoxedge.v2020_09_01.models.EncryptionAlgorithm
"""
_validation = {
'value': {'required': True},
'encryption_algorithm': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'encryption_cert_thumbprint': {'key': 'encryptionCertThumbprint', 'type': 'str'},
'encryption_algorithm': {'key': 'encryptionAlgorithm', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AsymmetricEncryptedSecret, self).__init__(**kwargs)
self.value = kwargs['value']
self.encryption_cert_thumbprint = kwargs.get('encryption_cert_thumbprint', None)
self.encryption_algorithm = kwargs['encryption_algorithm']
class Authentication(msrest.serialization.Model):
"""Authentication mechanism for IoT devices.
:param symmetric_key: Symmetric key for authentication.
:type symmetric_key: ~azure.mgmt.databoxedge.v2020_09_01.models.SymmetricKey
"""
_attribute_map = {
'symmetric_key': {'key': 'symmetricKey', 'type': 'SymmetricKey'},
}
def __init__(
self,
**kwargs
):
super(Authentication, self).__init__(**kwargs)
self.symmetric_key = kwargs.get('symmetric_key', None)
class AzureContainerInfo(msrest.serialization.Model):
"""Azure container mapping of the endpoint.
All required parameters must be populated in order to send to Azure.
:param storage_account_credential_id: Required. ID of the storage account credential used to
access storage.
:type storage_account_credential_id: str
:param container_name: Required. Container name (Based on the data format specified, this
represents the name of Azure Files/Page blob/Block blob).
:type container_name: str
:param data_format: Required. Storage format used for the file represented by the share.
Possible values include: "BlockBlob", "PageBlob", "AzureFile".
:type data_format: str or ~azure.mgmt.databoxedge.v2020_09_01.models.AzureContainerDataFormat
"""
_validation = {
'storage_account_credential_id': {'required': True},
'container_name': {'required': True},
'data_format': {'required': True},
}
_attribute_map = {
'storage_account_credential_id': {'key': 'storageAccountCredentialId', 'type': 'str'},
'container_name': {'key': 'containerName', 'type': 'str'},
'data_format': {'key': 'dataFormat', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureContainerInfo, self).__init__(**kwargs)
self.storage_account_credential_id = kwargs['storage_account_credential_id']
self.container_name = kwargs['container_name']
self.data_format = kwargs['data_format']
class BandwidthSchedule(ARMBaseModel):
"""The bandwidth schedule details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:ivar system_data: Bandwidth object related to ASE resource.
:vartype system_data: ~azure.mgmt.databoxedge.v2020_09_01.models.SystemData
:param start: Required. The start time of the schedule in UTC.
:type start: str
:param stop: Required. The stop time of the schedule in UTC.
:type stop: str
:param rate_in_mbps: Required. The bandwidth rate in Mbps.
:type rate_in_mbps: int
:param days: Required. The days of the week when this schedule is applicable.
:type days: list[str or ~azure.mgmt.databoxedge.v2020_09_01.models.DayOfWeek]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'start': {'required': True},
'stop': {'required': True},
'rate_in_mbps': {'required': True},
'days': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'start': {'key': 'properties.start', 'type': 'str'},
'stop': {'key': 'properties.stop', 'type': 'str'},
'rate_in_mbps': {'key': 'properties.rateInMbps', 'type': 'int'},
'days': {'key': 'properties.days', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(BandwidthSchedule, self).__init__(**kwargs)
self.system_data = None
self.start = kwargs['start']
self.stop = kwargs['stop']
self.rate_in_mbps = kwargs['rate_in_mbps']
self.days = kwargs['days']
class BandwidthSchedulesList(msrest.serialization.Model):
"""The collection of bandwidth schedules.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of bandwidth schedules.
:vartype value: list[~azure.mgmt.databoxedge.v2020_09_01.models.BandwidthSchedule]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[BandwidthSchedule]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BandwidthSchedulesList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class ClientAccessRight(msrest.serialization.Model):
"""The mapping between a particular client IP and the type of access client has on the NFS share.
All required parameters must be populated in order to send to Azure.
:param client: Required. IP of the client.
:type client: str
:param access_permission: Required. Type of access to be allowed for the client. Possible
values include: "NoAccess", "ReadOnly", "ReadWrite".
:type access_permission: str or ~azure.mgmt.databoxedge.v2020_09_01.models.ClientPermissionType
"""
_validation = {
'client': {'required': True},
'access_permission': {'required': True},
}
_attribute_map = {
'client': {'key': 'client', 'type': 'str'},
'access_permission': {'key': 'accessPermission', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ClientAccessRight, self).__init__(**kwargs)
self.client = kwargs['client']
self.access_permission = kwargs['access_permission']
class Role(ARMBaseModel):
"""Compute role.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: CloudEdgeManagementRole, IoTRole, KubernetesRole, MECRole.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param kind: Required. Role type.Constant filled by server. Possible values include: "IOT",
"ASA", "Functions", "Cognitive", "MEC", "CloudEdgeManagement", "Kubernetes".
:type kind: str or ~azure.mgmt.databoxedge.v2020_09_01.models.RoleTypes
:ivar system_data: Role configured on ASE resource.
:vartype system_data: ~azure.mgmt.databoxedge.v2020_09_01.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'required': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
_subtype_map = {
'kind': {'CloudEdgeManagement': 'CloudEdgeManagementRole', 'IOT': 'IoTRole', 'Kubernetes': 'KubernetesRole', 'MEC': 'MECRole'}
}
def __init__(
self,
**kwargs
):
super(Role, self).__init__(**kwargs)
self.kind = 'Role' # type: str
self.system_data = None
class CloudEdgeManagementRole(Role):
"""CloudEdgeManagementRole role.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param kind: Required. Role type.Constant filled by server. Possible values include: "IOT",
"ASA", "Functions", "Cognitive", "MEC", "CloudEdgeManagement", "Kubernetes".
:type kind: str or ~azure.mgmt.databoxedge.v2020_09_01.models.RoleTypes
:ivar system_data: Role configured on ASE resource.
:vartype system_data: ~azure.mgmt.databoxedge.v2020_09_01.models.SystemData
:ivar local_management_status: Local Edge Management Status. Possible values include:
"Enabled", "Disabled".
:vartype local_management_status: str or ~azure.mgmt.databoxedge.v2020_09_01.models.RoleStatus
:ivar edge_profile: Edge Profile of the resource.
:vartype edge_profile: ~azure.mgmt.databoxedge.v2020_09_01.models.EdgeProfile
:param role_status: Role status. Possible values include: "Enabled", "Disabled".
:type role_status: str or ~azure.mgmt.databoxedge.v2020_09_01.models.RoleStatus
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'required': True},
'system_data': {'readonly': True},
'local_management_status': {'readonly': True},
'edge_profile': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'local_management_status': {'key': 'properties.localManagementStatus', 'type': 'str'},
'edge_profile': {'key': 'properties.edgeProfile', 'type': 'EdgeProfile'},
'role_status': {'key': 'properties.roleStatus', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CloudEdgeManagementRole, self).__init__(**kwargs)
self.kind = 'CloudEdgeManagement' # type: str
self.local_management_status = None
self.edge_profile = None
self.role_status = kwargs.get('role_status', None)
class CloudErrorBody(msrest.serialization.Model):
"""An error response from the service.
:param code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:type code: str
:param message: A message describing the error, intended to be suitable for display in a user
interface.
:type message: str
:param details: A list of additional details about the error.
:type details: list[~azure.mgmt.databoxedge.v2020_09_01.models.CloudErrorBody]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudErrorBody]'},
}
def __init__(
self,
**kwargs
):
super(CloudErrorBody, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
self.details = kwargs.get('details', None)
class CniConfig(msrest.serialization.Model):
"""Cni configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: Cni type.
:vartype type: str
:ivar version: Cni version.
:vartype version: str
:ivar pod_subnet: Pod Subnet.
:vartype pod_subnet: str
:ivar service_subnet: Service subnet.
:vartype service_subnet: str
"""
_validation = {
'type': {'readonly': True},
'version': {'readonly': True},
'pod_subnet': {'readonly': True},
'service_subnet': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'pod_subnet': {'key': 'podSubnet', 'type': 'str'},
'service_subnet': {'key': 'serviceSubnet', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CniConfig, self).__init__(**kwargs)
self.type = None
self.version = None
self.pod_subnet = None
self.service_subnet = None
class ComputeResource(msrest.serialization.Model):
"""Compute infrastructure Resource.
All required parameters must be populated in order to send to Azure.
:param processor_count: Required. Processor count.
:type processor_count: int
:param memory_in_gb: Required. Memory in GB.
:type memory_in_gb: long
"""
_validation = {
'processor_count': {'required': True},
'memory_in_gb': {'required': True},
}
_attribute_map = {
'processor_count': {'key': 'processorCount', 'type': 'int'},
'memory_in_gb': {'key': 'memoryInGB', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(ComputeResource, self).__init__(**kwargs)
self.processor_count = kwargs['processor_count']
self.memory_in_gb = kwargs['memory_in_gb']
class ContactDetails(msrest.serialization.Model):
"""Contains all the contact details of the customer.
All required parameters must be populated in order to send to Azure.
:param contact_person: Required. The contact person name.
:type contact_person: str
:param company_name: Required. The name of the company.
:type company_name: str
:param phone: Required. The phone number.
:type phone: str
:param email_list: Required. The email list.
:type email_list: list[str]
"""
_validation = {
'contact_person': {'required': True},
'company_name': {'required': True},
'phone': {'required': True},
'email_list': {'required': True},
}
_attribute_map = {
'contact_person': {'key': 'contactPerson', 'type': 'str'},
'company_name': {'key': 'companyName', 'type': 'str'},
'phone': {'key': 'phone', 'type': 'str'},
'email_list': {'key': 'emailList', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(ContactDetails, self).__init__(**kwargs)
self.contact_person = kwargs['contact_person']
self.company_name = kwargs['company_name']
self.phone = kwargs['phone']
self.email_list = kwargs['email_list']
class Container(ARMBaseModel):
"""Represents a container on the Data Box Edge/Gateway device.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:ivar system_data: Container in DataBoxEdge Resource.
:vartype system_data: ~azure.mgmt.databoxedge.v2020_09_01.models.SystemData
:ivar container_status: Current status of the container. Possible values include: "OK",
"Offline", "Unknown", "Updating", "NeedsAttention".
:vartype container_status: str or ~azure.mgmt.databoxedge.v2020_09_01.models.ContainerStatus
:param data_format: Required. DataFormat for Container. Possible values include: "BlockBlob",
"PageBlob", "AzureFile".
:type data_format: str or ~azure.mgmt.databoxedge.v2020_09_01.models.AzureContainerDataFormat
:ivar refresh_details: Details of the refresh job on this container.
:vartype refresh_details: ~azure.mgmt.databoxedge.v2020_09_01.models.RefreshDetails
:ivar created_date_time: The UTC time when container got created.
:vartype created_date_time: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'container_status': {'readonly': True},
'data_format': {'required': True},
'refresh_details': {'readonly': True},
'created_date_time': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'container_status': {'key': 'properties.containerStatus', 'type': 'str'},
'data_format': {'key': 'properties.dataFormat', 'type': 'str'},
'refresh_details': {'key': 'properties.refreshDetails', 'type': 'RefreshDetails'},
'created_date_time': {'key': 'properties.createdDateTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(Container, self).__init__(**kwargs)
self.system_data = None
self.container_status = None
self.data_format = kwargs['data_format']
self.refresh_details = None
self.created_date_time = None
class ContainerList(msrest.serialization.Model):
"""Collection of all the containers on the Data Box Edge/Gateway device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of containers.
:vartype value: list[~azure.mgmt.databoxedge.v2020_09_01.models.Container]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Container]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContainerList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class DataBoxEdgeDevice(ARMBaseModel):
"""The Data Box Edge/Gateway device.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param location: Required. The location of the device. This is a supported and registered Azure
geographical region (for example, West US, East US, or Southeast Asia). The geographical region
of a device cannot be changed once it is created, but if an identical geographical region is
specified on update, the request will succeed.
:type location: str
:param tags: A set of tags. The list of tags that describe the device. These tags can be used
to view and group this device (across resource groups).
:type tags: dict[str, str]
:param sku: The SKU type.
:type sku: ~azure.mgmt.databoxedge.v2020_09_01.models.Sku
:param etag: The etag for the devices.
:type etag: str
:param identity: Msi identity of the resource.
:type identity: ~azure.mgmt.databoxedge.v2020_09_01.models.ResourceIdentity
:ivar kind: The etag for the devices. Possible values include: "AzureDataBoxGateway",
"AzureStackEdge", "AzureStackHub", "AzureModularDataCentre".
:vartype kind: str or ~azure.mgmt.databoxedge.v2020_09_01.models.DataBoxEdgeDeviceKind
:ivar system_data: DataBoxEdge Resource.
:vartype system_data: ~azure.mgmt.databoxedge.v2020_09_01.models.SystemData
:param data_box_edge_device_status: The status of the Data Box Edge/Gateway device. Possible
values include: "ReadyToSetup", "Online", "Offline", "NeedsAttention", "Disconnected",
"PartiallyDisconnected", "Maintenance".
:type data_box_edge_device_status: str or
~azure.mgmt.databoxedge.v2020_09_01.models.DataBoxEdgeDeviceStatus
:ivar serial_number: The Serial Number of Data Box Edge/Gateway device.
:vartype serial_number: str
:ivar description: The Description of the Data Box Edge/Gateway device.
:vartype description: str
:ivar model_description: The description of the Data Box Edge/Gateway device model.
:vartype model_description: str
:ivar device_type: The type of the Data Box Edge/Gateway device. Possible values include:
"DataBoxEdgeDevice".
:vartype device_type: str or ~azure.mgmt.databoxedge.v2020_09_01.models.DeviceType
:ivar friendly_name: The Data Box Edge/Gateway device name.
:vartype friendly_name: str
:ivar culture: The Data Box Edge/Gateway device culture.
:vartype culture: str
:ivar device_model: The Data Box Edge/Gateway device model.
:vartype device_model: str
:ivar device_software_version: The Data Box Edge/Gateway device software version.
:vartype device_software_version: str
:ivar device_local_capacity: The Data Box Edge/Gateway device local capacity in MB.
:vartype device_local_capacity: long
:ivar time_zone: The Data Box Edge/Gateway device timezone.
:vartype time_zone: str
:ivar device_hcs_version: The device software version number of the device (eg: 1.2.18105.6).
:vartype device_hcs_version: str
:ivar configured_role_types: Type of compute roles configured.
:vartype configured_role_types: list[str or
~azure.mgmt.databoxedge.v2020_09_01.models.RoleTypes]
:ivar node_count: The number of nodes in the cluster.
:vartype node_count: int
:ivar resource_move_details: The details of the move operation on this resource.
:vartype resource_move_details: ~azure.mgmt.databoxedge.v2020_09_01.models.ResourceMoveDetails
:ivar edge_profile: The details of Edge Profile for this resource.
:vartype edge_profile: ~azure.mgmt.databoxedge.v2020_09_01.models.EdgeProfile
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'kind': {'readonly': True},
'system_data': {'readonly': True},
'serial_number': {'readonly': True},
'description': {'readonly': True},
'model_description': {'readonly': True},
'device_type': {'readonly': True},
'friendly_name': {'readonly': True},
'culture': {'readonly': True},
'device_model': {'readonly': True},
'device_software_version': {'readonly': True},
'device_local_capacity': {'readonly': True},
'time_zone': {'readonly': True},
'device_hcs_version': {'readonly': True},
'configured_role_types': {'readonly': True},
'node_count': {'readonly': True},
'resource_move_details': {'readonly': True},
'edge_profile': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'etag': {'key': 'etag', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'kind': {'key': 'kind', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'data_box_edge_device_status': {'key': 'properties.dataBoxEdgeDeviceStatus', 'type': 'str'},
'serial_number': {'key': 'properties.serialNumber', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'model_description': {'key': 'properties.modelDescription', 'type': 'str'},
'device_type': {'key': 'properties.deviceType', 'type': 'str'},
'friendly_name': {'key': 'properties.friendlyName', 'type': 'str'},
'culture': {'key': 'properties.culture', 'type': 'str'},
'device_model': {'key': 'properties.deviceModel', 'type': 'str'},
'device_software_version': {'key': 'properties.deviceSoftwareVersion', 'type': 'str'},
'device_local_capacity': {'key': 'properties.deviceLocalCapacity', 'type': 'long'},
'time_zone': {'key': 'properties.timeZone', 'type': 'str'},
'device_hcs_version': {'key': 'properties.deviceHcsVersion', 'type': 'str'},
'configured_role_types': {'key': 'properties.configuredRoleTypes', 'type': '[str]'},
'node_count': {'key': 'properties.nodeCount', 'type': 'int'},
'resource_move_details': {'key': 'properties.resourceMoveDetails', 'type': 'ResourceMoveDetails'},
'edge_profile': {'key': 'properties.edgeProfile', 'type': 'EdgeProfile'},
}
def __init__(
self,
**kwargs
):
super(DataBoxEdgeDevice, self).__init__(**kwargs)
self.location = kwargs['location']
self.tags = kwargs.get('tags', None)
self.sku = kwargs.get('sku', None)
self.etag = kwargs.get('etag', None)
self.identity = kwargs.get('identity', None)
self.kind = None
self.system_data = None
self.data_box_edge_device_status = kwargs.get('data_box_edge_device_status', None)
self.serial_number = None
self.description = None
self.model_description = None
self.device_type = None
self.friendly_name = None
self.culture = None
self.device_model = None
self.device_software_version = None
self.device_local_capacity = None
self.time_zone = None
self.device_hcs_version = None
self.configured_role_types = None
self.node_count = None
self.resource_move_details = None
self.edge_profile = None
class DataBoxEdgeDeviceExtendedInfo(ARMBaseModel):
"""The extended Info of the Data Box Edge/Gateway device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param encryption_key_thumbprint: The digital signature of encrypted certificate.
:type encryption_key_thumbprint: str
:param encryption_key: The public part of the encryption certificate. Client uses this to
encrypt any secret.
:type encryption_key: str
:ivar resource_key: The Resource ID of the Resource.
:vartype resource_key: str
:param client_secret_store_id: The Key Vault ARM Id for client secrets.
:type client_secret_store_id: str
:param client_secret_store_url: The url to access the Client Key Vault.
:type client_secret_store_url: str
:param channel_integrity_key_name: The name of Channel Integrity Key stored in the Client Key
Vault.
:type channel_integrity_key_name: str
:param channel_integrity_key_version: The version of Channel Integrity Key stored in the Client
Key Vault.
:type channel_integrity_key_version: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource_key': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'encryption_key_thumbprint': {'key': 'properties.encryptionKeyThumbprint', 'type': 'str'},
'encryption_key': {'key': 'properties.encryptionKey', 'type': 'str'},
'resource_key': {'key': 'properties.resourceKey', 'type': 'str'},
'client_secret_store_id': {'key': 'properties.clientSecretStoreId', 'type': 'str'},
'client_secret_store_url': {'key': 'properties.clientSecretStoreUrl', 'type': 'str'},
'channel_integrity_key_name': {'key': 'properties.channelIntegrityKeyName', 'type': 'str'},
'channel_integrity_key_version': {'key': 'properties.channelIntegrityKeyVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataBoxEdgeDeviceExtendedInfo, self).__init__(**kwargs)
self.encryption_key_thumbprint = kwargs.get('encryption_key_thumbprint', None)
self.encryption_key = kwargs.get('encryption_key', None)
self.resource_key = None
self.client_secret_store_id = kwargs.get('client_secret_store_id', None)
self.client_secret_store_url = kwargs.get('client_secret_store_url', None)
self.channel_integrity_key_name = kwargs.get('channel_integrity_key_name', None)
self.channel_integrity_key_version = kwargs.get('channel_integrity_key_version', None)
class DataBoxEdgeDeviceExtendedInfoPatch(msrest.serialization.Model):
"""The Data Box Edge/Gateway device extended info patch.
:param client_secret_store_id: The Key Vault ARM Id for client secrets.
:type client_secret_store_id: str
:param client_secret_store_url: The url to access the Client Key Vault.
:type client_secret_store_url: str
:param channel_integrity_key_name: The name for Channel Integrity Key stored in the Client Key
Vault.
:type channel_integrity_key_name: str
:param channel_integrity_key_version: The version of Channel Integrity Key stored in the Client
Key Vault.
:type channel_integrity_key_version: str
"""
_attribute_map = {
'client_secret_store_id': {'key': 'clientSecretStoreId', 'type': 'str'},
'client_secret_store_url': {'key': 'clientSecretStoreUrl', 'type': 'str'},
'channel_integrity_key_name': {'key': 'channelIntegrityKeyName', 'type': 'str'},
'channel_integrity_key_version': {'key': 'channelIntegrityKeyVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataBoxEdgeDeviceExtendedInfoPatch, self).__init__(**kwargs)
self.client_secret_store_id = kwargs.get('client_secret_store_id', None)
self.client_secret_store_url = kwargs.get('client_secret_store_url', None)
self.channel_integrity_key_name = kwargs.get('channel_integrity_key_name', None)
self.channel_integrity_key_version = kwargs.get('channel_integrity_key_version', None)
class DataBoxEdgeDeviceList(msrest.serialization.Model):
"""The collection of Data Box Edge/Gateway devices.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of Data Box Edge/Gateway devices.
:vartype value: list[~azure.mgmt.databoxedge.v2020_09_01.models.DataBoxEdgeDevice]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DataBoxEdgeDevice]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataBoxEdgeDeviceList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class DataBoxEdgeDevicePatch(msrest.serialization.Model):
"""The Data Box Edge/Gateway device patch.
:param tags: A set of tags. The tags attached to the Data Box Edge/Gateway resource.
:type tags: dict[str, str]
:param identity: Msi identity of the resource.
:type identity: ~azure.mgmt.databoxedge.v2020_09_01.models.ResourceIdentity
:param edge_profile: Edge Profile property of the Data Box Edge/Gateway device.
:type edge_profile: ~azure.mgmt.databoxedge.v2020_09_01.models.EdgeProfilePatch
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'edge_profile': {'key': 'properties.edgeProfile', 'type': 'EdgeProfilePatch'},
}
def __init__(
self,
**kwargs
):
super(DataBoxEdgeDevicePatch, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.identity = kwargs.get('identity', None)
self.edge_profile = kwargs.get('edge_profile', None)
class DataBoxEdgeMoveRequest(msrest.serialization.Model):
"""Resource Move details.
All required parameters must be populated in order to send to Azure.
:param target_resource_group: Required. Target resource group ARMId.
:type target_resource_group: str
:param resources: Required. List of resources to be moved.
:type resources: list[str]
"""
_validation = {
'target_resource_group': {'required': True},
'resources': {'required': True},
}
_attribute_map = {
'target_resource_group': {'key': 'targetResourceGroup', 'type': 'str'},
'resources': {'key': 'resources', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(DataBoxEdgeMoveRequest, self).__init__(**kwargs)
self.target_resource_group = kwargs['target_resource_group']
self.resources = kwargs['resources']
class DataBoxEdgeSku(msrest.serialization.Model):
"""The Sku information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar resource_type: The type of the resource.
:vartype resource_type: str
:ivar name: The Sku name. Possible values include: "Gateway", "Edge", "TEA_1Node",
"TEA_1Node_UPS", "TEA_1Node_Heater", "TEA_1Node_UPS_Heater", "TEA_4Node_Heater",
"TEA_4Node_UPS_Heater", "TMA", "TDC", "TCA_Small", "GPU", "TCA_Large", "EdgeP_Base",
"EdgeP_High", "EdgePR_Base", "EdgePR_Base_UPS", "EdgeMR_Mini", "RCA_Small", "RCA_Large", "RDC".
:vartype name: str or ~azure.mgmt.databoxedge.v2020_09_01.models.SkuName
:ivar kind: The Sku kind.
:vartype kind: str
:ivar tier: The Sku tier. Possible values include: "Standard".
:vartype tier: str or ~azure.mgmt.databoxedge.v2020_09_01.models.SkuTier
:ivar size: The Sku kind.
:vartype size: str
:ivar family: The Sku family.
:vartype family: str
:ivar locations: Availability of the Sku for the region.
:vartype locations: list[str]
:ivar api_versions: The API versions in which Sku is available.
:vartype api_versions: list[str]
:ivar location_info: Availability of the Sku for the location/zone/site.
:vartype location_info: list[~azure.mgmt.databoxedge.v2020_09_01.models.SkuLocationInfo]
:ivar costs: The pricing info of the Sku.
:vartype costs: list[~azure.mgmt.databoxedge.v2020_09_01.models.SkuCost]
:ivar signup_option: Sku can be signed up by customer or not. Possible values include: "None",
"Available".
:vartype signup_option: str or ~azure.mgmt.databoxedge.v2020_09_01.models.SkuSignupOption
:ivar version: Availability of the Sku as preview/stable. Possible values include: "Stable",
"Preview".
:vartype version: str or ~azure.mgmt.databoxedge.v2020_09_01.models.SkuVersion
:ivar availability: Links to the next set of results. Possible values include: "Available",
"Unavailable".
:vartype availability: str or ~azure.mgmt.databoxedge.v2020_09_01.models.SkuAvailability
:ivar shipment_types: List of Shipment Types supported by this SKU.
:vartype shipment_types: list[str or ~azure.mgmt.databoxedge.v2020_09_01.models.ShipmentType]
"""
_validation = {
'resource_type': {'readonly': True},
'name': {'readonly': True},
'kind': {'readonly': True},
'tier': {'readonly': True},
'size': {'readonly': True},
'family': {'readonly': True},
'locations': {'readonly': True},
'api_versions': {'readonly': True},
'location_info': {'readonly': True},
'costs': {'readonly': True},
'signup_option': {'readonly': True},
'version': {'readonly': True},
'availability': {'readonly': True},
'shipment_types': {'readonly': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'size': {'key': 'size', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
'locations': {'key': 'locations', 'type': '[str]'},
'api_versions': {'key': 'apiVersions', 'type': '[str]'},
'location_info': {'key': 'locationInfo', 'type': '[SkuLocationInfo]'},
'costs': {'key': 'costs', 'type': '[SkuCost]'},
'signup_option': {'key': 'signupOption', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'availability': {'key': 'availability', 'type': 'str'},
'shipment_types': {'key': 'shipmentTypes', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(DataBoxEdgeSku, self).__init__(**kwargs)
self.resource_type = None
self.name = None
self.kind = None
self.tier = None
self.size = None
self.family = None
self.locations = None
self.api_versions = None
self.location_info = None
self.costs = None
self.signup_option = None
self.version = None
self.availability = None
self.shipment_types = None
class DataBoxEdgeSkuList(msrest.serialization.Model):
"""List of SKU Information objects.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of ResourceType Sku.
:vartype value: list[~azure.mgmt.databoxedge.v2020_09_01.models.DataBoxEdgeSku]
:ivar next_link: Links to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DataBoxEdgeSku]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataBoxEdgeSkuList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class DCAccessCode(msrest.serialization.Model):
"""DC Access code in the case of Self Managed Shipping.
:param auth_code: DCAccess Code for the Self Managed shipment.
:type auth_code: str
"""
_attribute_map = {
'auth_code': {'key': 'properties.authCode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DCAccessCode, self).__init__(**kwargs)
self.auth_code = kwargs.get('auth_code', None)
class EdgeProfile(msrest.serialization.Model):
"""Details about Edge Profile for the resource.
:param subscription: Edge Profile Subscription.
:type subscription: ~azure.mgmt.databoxedge.v2020_09_01.models.EdgeProfileSubscription
"""
_attribute_map = {
'subscription': {'key': 'subscription', 'type': 'EdgeProfileSubscription'},
}
def __init__(
self,
**kwargs
):
super(EdgeProfile, self).__init__(**kwargs)
self.subscription = kwargs.get('subscription', None)
class EdgeProfilePatch(msrest.serialization.Model):
"""The Data Box Edge/Gateway Edge Profile patch.
:param subscription: The Data Box Edge/Gateway Edge Profile Subscription patch.
:type subscription: ~azure.mgmt.databoxedge.v2020_09_01.models.EdgeProfileSubscriptionPatch
"""
_attribute_map = {
'subscription': {'key': 'subscription', 'type': 'EdgeProfileSubscriptionPatch'},
}
def __init__(
self,
**kwargs
):
super(EdgeProfilePatch, self).__init__(**kwargs)
self.subscription = kwargs.get('subscription', None)
class EdgeProfileSubscription(msrest.serialization.Model):
"""Subscription details for the Edge Profile.
:param registration_id: Edge Subscription Registration ID.
:type registration_id: str
:param id: ARM ID of the subscription.
:type id: str
:param state: Possible values include: "Registered", "Warned", "Suspended", "Deleted",
"Unregistered".
:type state: str or ~azure.mgmt.databoxedge.v2020_09_01.models.SubscriptionState
:param registration_date:
:type registration_date: str
:param subscription_id:
:type subscription_id: str
:param tenant_id:
:type tenant_id: str
:param location_placement_id:
:type location_placement_id: str
:param quota_id:
:type quota_id: str
:param serialized_details:
:type serialized_details: str
:param registered_features:
:type registered_features:
list[~azure.mgmt.databoxedge.v2020_09_01.models.SubscriptionRegisteredFeatures]
"""
_attribute_map = {
'registration_id': {'key': 'registrationId', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'registration_date': {'key': 'registrationDate', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'tenant_id': {'key': 'properties.tenantId', 'type': 'str'},
'location_placement_id': {'key': 'properties.locationPlacementId', 'type': 'str'},
'quota_id': {'key': 'properties.quotaId', 'type': 'str'},
'serialized_details': {'key': 'properties.serializedDetails', 'type': 'str'},
'registered_features': {'key': 'properties.registeredFeatures', 'type': '[SubscriptionRegisteredFeatures]'},
}
def __init__(
self,
**kwargs
):
super(EdgeProfileSubscription, self).__init__(**kwargs)
self.registration_id = kwargs.get('registration_id', None)
self.id = kwargs.get('id', None)
self.state = kwargs.get('state', None)
self.registration_date = kwargs.get('registration_date', None)
self.subscription_id = kwargs.get('subscription_id', None)
self.tenant_id = kwargs.get('tenant_id', None)
self.location_placement_id = kwargs.get('location_placement_id', None)
self.quota_id = kwargs.get('quota_id', None)
self.serialized_details = kwargs.get('serialized_details', None)
self.registered_features = kwargs.get('registered_features', None)
class EdgeProfileSubscriptionPatch(msrest.serialization.Model):
"""The Data Box Edge/Gateway Edge Profile Subscription patch.
:param id: The path ID that uniquely identifies the subscription of the edge profile.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdgeProfileSubscriptionPatch, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
class EtcdInfo(msrest.serialization.Model):
"""Etcd configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: Etcd type.
:vartype type: str
:ivar version: Etcd version.
:vartype version: str
"""
_validation = {
'type': {'readonly': True},
'version': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EtcdInfo, self).__init__(**kwargs)
self.type = None
self.version = None
class Trigger(ARMBaseModel):
"""Trigger details.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: FileEventTrigger, PeriodicTimerEventTrigger.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:ivar system_data: Trigger in DataBoxEdge Resource.
:vartype system_data: ~azure.mgmt.databoxedge.v2020_09_01.models.SystemData
:param kind: Required. Trigger Kind.Constant filled by server. Possible values include:
"FileEvent", "PeriodicTimerEvent".
:type kind: str or ~azure.mgmt.databoxedge.v2020_09_01.models.TriggerEventType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'kind': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'kind': {'key': 'kind', 'type': 'str'},
}
_subtype_map = {
'kind': {'FileEvent': 'FileEventTrigger', 'PeriodicTimerEvent': 'PeriodicTimerEventTrigger'}
}
def __init__(
self,
**kwargs
):
super(Trigger, self).__init__(**kwargs)
self.system_data = None
self.kind = 'Trigger' # type: str
class FileEventTrigger(Trigger):
"""Trigger details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:ivar system_data: Trigger in DataBoxEdge Resource.
:vartype system_data: ~azure.mgmt.databoxedge.v2020_09_01.models.SystemData
:param kind: Required. Trigger Kind.Constant filled by server. Possible values include:
"FileEvent", "PeriodicTimerEvent".
:type kind: str or ~azure.mgmt.databoxedge.v2020_09_01.models.TriggerEventType
:param source_info: Required. File event source details.
:type source_info: ~azure.mgmt.databoxedge.v2020_09_01.models.FileSourceInfo
:param sink_info: Required. Role sink info.
:type sink_info: ~azure.mgmt.databoxedge.v2020_09_01.models.RoleSinkInfo
:param custom_context_tag: A custom context tag typically used to correlate the trigger against
its usage. For example, if a periodic timer trigger is intended for certain specific IoT
modules in the device, the tag can be the name or the image URL of the module.
:type custom_context_tag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'kind': {'required': True},
'source_info': {'required': True},
'sink_info': {'required': True},
'custom_context_tag': {'max_length': 192, 'min_length': 0},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'kind': {'key': 'kind', 'type': 'str'},
'source_info': {'key': 'properties.sourceInfo', 'type': 'FileSourceInfo'},
'sink_info': {'key': 'properties.sinkInfo', 'type': 'RoleSinkInfo'},
'custom_context_tag': {'key': 'properties.customContextTag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FileEventTrigger, self).__init__(**kwargs)
self.kind = 'FileEvent' # type: str
self.source_info = kwargs['source_info']
self.sink_info = kwargs['sink_info']
self.custom_context_tag = kwargs.get('custom_context_tag', None)
class FileSourceInfo(msrest.serialization.Model):
"""File source details.
All required parameters must be populated in order to send to Azure.
:param share_id: Required. File share ID.
:type share_id: str
"""
_validation = {
'share_id': {'required': True},
}
_attribute_map = {
'share_id': {'key': 'shareId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FileSourceInfo, self).__init__(**kwargs)
self.share_id = kwargs['share_id']
class GenerateCertResponse(msrest.serialization.Model):
"""Used in activation key generation flow.
:param public_key: Gets or sets base64 encoded certificate raw data,
this is the public part needed to be uploaded to cert vault.
:type public_key: str
:param private_key: Gets or sets base64 encoded private part of the certificate,
needed to form the activation key.
:type private_key: str
:param expiry_time_in_utc: Gets or sets expiry time in UTC.
:type expiry_time_in_utc: str
"""
_attribute_map = {
'public_key': {'key': 'publicKey', 'type': 'str'},
'private_key': {'key': 'privateKey', 'type': 'str'},
'expiry_time_in_utc': {'key': 'expiryTimeInUTC', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GenerateCertResponse, self).__init__(**kwargs)
self.public_key = kwargs.get('public_key', None)
self.private_key = kwargs.get('private_key', None)
self.expiry_time_in_utc = kwargs.get('expiry_time_in_utc', None)
class ImageRepositoryCredential(msrest.serialization.Model):
"""Image repository credential.
All required parameters must be populated in order to send to Azure.
:param image_repository_url: Required. Image repository url (e.g.: mcr.microsoft.com).
:type image_repository_url: str
:param user_name: Required. Repository user name.
:type user_name: str
:param password: Repository user password.
:type password: ~azure.mgmt.databoxedge.v2020_09_01.models.AsymmetricEncryptedSecret
"""
_validation = {
'image_repository_url': {'required': True},
'user_name': {'required': True},
}
_attribute_map = {
'image_repository_url': {'key': 'imageRepositoryUrl', 'type': 'str'},
'user_name': {'key': 'userName', 'type': 'str'},
'password': {'key': 'password', 'type': 'AsymmetricEncryptedSecret'},
}
def __init__(
self,
**kwargs
):
super(ImageRepositoryCredential, self).__init__(**kwargs)
self.image_repository_url = kwargs['image_repository_url']
self.user_name = kwargs['user_name']
self.password = kwargs.get('password', None)
class IoTAddon(Addon):
"""IoT Addon.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param kind: Required. Addon type.Constant filled by server. Possible values include:
"IotEdge", "ArcForKubernetes".
:type kind: str or ~azure.mgmt.databoxedge.v2020_09_01.models.AddonType
:ivar system_data: Addon type.
:vartype system_data: ~azure.mgmt.databoxedge.v2020_09_01.models.SystemData
:param io_t_device_details: Required. IoT device metadata to which appliance needs to be
connected.
:type io_t_device_details: ~azure.mgmt.databoxedge.v2020_09_01.models.IoTDeviceInfo
:param io_t_edge_device_details: Required. IoT edge device to which the IoT Addon needs to be
configured.
:type io_t_edge_device_details: ~azure.mgmt.databoxedge.v2020_09_01.models.IoTDeviceInfo
:ivar version: Version of IoT running on the appliance.
:vartype version: str
:ivar host_platform: Host OS supported by the IoT addon. Possible values include: "Windows",
"Linux".
:vartype host_platform: str or ~azure.mgmt.databoxedge.v2020_09_01.models.PlatformType
:ivar host_platform_type: Platform where the runtime is hosted. Possible values include:
"KubernetesCluster", "LinuxVM".
:vartype host_platform_type: str or ~azure.mgmt.databoxedge.v2020_09_01.models.HostPlatformType
:ivar provisioning_state: Addon Provisioning State. Possible values include: "Invalid",
"Creating", "Created", "Updating", "Reconfiguring", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.databoxedge.v2020_09_01.models.AddonState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'required': True},
'system_data': {'readonly': True},
'io_t_device_details': {'required': True},
'io_t_edge_device_details': {'required': True},
'version': {'readonly': True},
'host_platform': {'readonly': True},
'host_platform_type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'io_t_device_details': {'key': 'properties.ioTDeviceDetails', 'type': 'IoTDeviceInfo'},
'io_t_edge_device_details': {'key': 'properties.ioTEdgeDeviceDetails', 'type': 'IoTDeviceInfo'},
'version': {'key': 'properties.version', 'type': 'str'},
'host_platform': {'key': 'properties.hostPlatform', 'type': 'str'},
'host_platform_type': {'key': 'properties.hostPlatformType', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IoTAddon, self).__init__(**kwargs)
self.kind = 'IotEdge' # type: str
self.io_t_device_details = kwargs['io_t_device_details']
self.io_t_edge_device_details = kwargs['io_t_edge_device_details']
self.version = None
self.host_platform = None
self.host_platform_type = None
self.provisioning_state = None
class IoTDeviceInfo(msrest.serialization.Model):
"""Metadata of IoT device/IoT Edge device to be configured.
All required parameters must be populated in order to send to Azure.
:param device_id: Required. ID of the IoT device/edge device.
:type device_id: str
:param io_t_host_hub: Required. Host name for the IoT hub associated to the device.
:type io_t_host_hub: str
:param io_t_host_hub_id: Id for the IoT hub associated to the device.
:type io_t_host_hub_id: str
:param authentication: Encrypted IoT device/IoT edge device connection string.
:type authentication: ~azure.mgmt.databoxedge.v2020_09_01.models.Authentication
"""
_validation = {
'device_id': {'required': True},
'io_t_host_hub': {'required': True},
}
_attribute_map = {
'device_id': {'key': 'deviceId', 'type': 'str'},
'io_t_host_hub': {'key': 'ioTHostHub', 'type': 'str'},
'io_t_host_hub_id': {'key': 'ioTHostHubId', 'type': 'str'},
'authentication': {'key': 'authentication', 'type': 'Authentication'},
}
def __init__(
self,
**kwargs
):
super(IoTDeviceInfo, self).__init__(**kwargs)
self.device_id = kwargs['device_id']
self.io_t_host_hub = kwargs['io_t_host_hub']
self.io_t_host_hub_id = kwargs.get('io_t_host_hub_id', None)
self.authentication = kwargs.get('authentication', None)
class IoTEdgeAgentInfo(msrest.serialization.Model):
"""IoT edge agent details is optional, this will be used for download system Agent module while bootstrapping IoT Role if specified.
All required parameters must be populated in order to send to Azure.
:param image_name: Required. Name of the IoT edge agent image.
:type image_name: str
:param tag: Required. Image Tag.
:type tag: str
:param image_repository: Image repository details.
:type image_repository: ~azure.mgmt.databoxedge.v2020_09_01.models.ImageRepositoryCredential
"""
_validation = {
'image_name': {'required': True},
'tag': {'required': True},
}
_attribute_map = {
'image_name': {'key': 'imageName', 'type': 'str'},
'tag': {'key': 'tag', 'type': 'str'},
'image_repository': {'key': 'imageRepository', 'type': 'ImageRepositoryCredential'},
}
def __init__(
self,
**kwargs
):
super(IoTEdgeAgentInfo, self).__init__(**kwargs)
self.image_name = kwargs['image_name']
self.tag = kwargs['tag']
self.image_repository = kwargs.get('image_repository', None)
class IoTRole(Role):
"""Compute role.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param kind: Required. Role type.Constant filled by server. Possible values include: "IOT",
"ASA", "Functions", "Cognitive", "MEC", "CloudEdgeManagement", "Kubernetes".
:type kind: str or ~azure.mgmt.databoxedge.v2020_09_01.models.RoleTypes
:ivar system_data: Role configured on ASE resource.
:vartype system_data: ~azure.mgmt.databoxedge.v2020_09_01.models.SystemData
:param host_platform: Host OS supported by the IoT role. Possible values include: "Windows",
"Linux".
:type host_platform: str or ~azure.mgmt.databoxedge.v2020_09_01.models.PlatformType
:param io_t_device_details: IoT device metadata to which data box edge device needs to be
connected.
:type io_t_device_details: ~azure.mgmt.databoxedge.v2020_09_01.models.IoTDeviceInfo
:param io_t_edge_device_details: IoT edge device to which the IoT role needs to be configured.
:type io_t_edge_device_details: ~azure.mgmt.databoxedge.v2020_09_01.models.IoTDeviceInfo
:param share_mappings: Mount points of shares in role(s).
:type share_mappings: list[~azure.mgmt.databoxedge.v2020_09_01.models.MountPointMap]
:param io_t_edge_agent_info: Iot edge agent details to download the agent and bootstrap iot
runtime.
:type io_t_edge_agent_info: ~azure.mgmt.databoxedge.v2020_09_01.models.IoTEdgeAgentInfo
:ivar host_platform_type: Platform where the Iot runtime is hosted. Possible values include:
"KubernetesCluster", "LinuxVM".
:vartype host_platform_type: str or ~azure.mgmt.databoxedge.v2020_09_01.models.HostPlatformType
:param compute_resource: Resource allocation.
:type compute_resource: ~azure.mgmt.databoxedge.v2020_09_01.models.ComputeResource
:param role_status: Role status. Possible values include: "Enabled", "Disabled".
:type role_status: str or ~azure.mgmt.databoxedge.v2020_09_01.models.RoleStatus
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'required': True},
'system_data': {'readonly': True},
'host_platform_type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'host_platform': {'key': 'properties.hostPlatform', 'type': 'str'},
'io_t_device_details': {'key': 'properties.ioTDeviceDetails', 'type': 'IoTDeviceInfo'},
'io_t_edge_device_details': {'key': 'properties.ioTEdgeDeviceDetails', 'type': 'IoTDeviceInfo'},
'share_mappings': {'key': 'properties.shareMappings', 'type': '[MountPointMap]'},
'io_t_edge_agent_info': {'key': 'properties.ioTEdgeAgentInfo', 'type': 'IoTEdgeAgentInfo'},
'host_platform_type': {'key': 'properties.hostPlatformType', 'type': 'str'},
'compute_resource': {'key': 'properties.computeResource', 'type': 'ComputeResource'},
'role_status': {'key': 'properties.roleStatus', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IoTRole, self).__init__(**kwargs)
self.kind = 'IOT' # type: str
self.host_platform = kwargs.get('host_platform', None)
self.io_t_device_details = kwargs.get('io_t_device_details', None)
self.io_t_edge_device_details = kwargs.get('io_t_edge_device_details', None)
self.share_mappings = kwargs.get('share_mappings', None)
self.io_t_edge_agent_info = kwargs.get('io_t_edge_agent_info', None)
self.host_platform_type = None
self.compute_resource = kwargs.get('compute_resource', None)
self.role_status = kwargs.get('role_status', None)
class Ipv4Config(msrest.serialization.Model):
"""Details related to the IPv4 address configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar ip_address: The IPv4 address of the network adapter.
:vartype ip_address: str
:ivar subnet: The IPv4 subnet of the network adapter.
:vartype subnet: str
:ivar gateway: The IPv4 gateway of the network adapter.
:vartype gateway: str
"""
_validation = {
'ip_address': {'readonly': True},
'subnet': {'readonly': True},
'gateway': {'readonly': True},
}
_attribute_map = {
'ip_address': {'key': 'ipAddress', 'type': 'str'},
'subnet': {'key': 'subnet', 'type': 'str'},
'gateway': {'key': 'gateway', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Ipv4Config, self).__init__(**kwargs)
self.ip_address = None
self.subnet = None
self.gateway = None
class Ipv6Config(msrest.serialization.Model):
"""Details related to the IPv6 address configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar ip_address: The IPv6 address of the network adapter.
:vartype ip_address: str
:ivar prefix_length: The IPv6 prefix of the network adapter.
:vartype prefix_length: int
:ivar gateway: The IPv6 gateway of the network adapter.
:vartype gateway: str
"""
_validation = {
'ip_address': {'readonly': True},
'prefix_length': {'readonly': True},
'gateway': {'readonly': True},
}
_attribute_map = {
'ip_address': {'key': 'ipAddress', 'type': 'str'},
'prefix_length': {'key': 'prefixLength', 'type': 'int'},
'gateway': {'key': 'gateway', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Ipv6Config, self).__init__(**kwargs)
self.ip_address = None
self.prefix_length = None
self.gateway = None
class Job(msrest.serialization.Model):
"""A device job.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The name of the object.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:ivar status: The current status of the job. Possible values include: "Invalid", "Running",
"Succeeded", "Failed", "Canceled", "Paused", "Scheduled".
:vartype status: str or ~azure.mgmt.databoxedge.v2020_09_01.models.JobStatus
:ivar start_time: The UTC date and time at which the job started.
:vartype start_time: ~datetime.datetime
:ivar end_time: The UTC date and time at which the job completed.
:vartype end_time: ~datetime.datetime
:ivar percent_complete: The percentage of the job that is complete.
:vartype percent_complete: int
:ivar error: The error details.
:vartype error: ~azure.mgmt.databoxedge.v2020_09_01.models.JobErrorDetails
:ivar job_type: The type of the job. Possible values include: "Invalid", "ScanForUpdates",
"DownloadUpdates", "InstallUpdates", "RefreshShare", "RefreshContainer", "Backup", "Restore",
"TriggerSupportPackage".
:vartype job_type: str or ~azure.mgmt.databoxedge.v2020_09_01.models.JobType
:ivar current_stage: Current stage of the update operation. Possible values include: "Unknown",
"Initial", "ScanStarted", "ScanComplete", "ScanFailed", "DownloadStarted", "DownloadComplete",
"DownloadFailed", "InstallStarted", "InstallComplete", "InstallFailed", "RebootInitiated",
"Success", "Failure", "RescanStarted", "RescanComplete", "RescanFailed".
:vartype current_stage: str or ~azure.mgmt.databoxedge.v2020_09_01.models.UpdateOperationStage
:ivar download_progress: The download progress.
:vartype download_progress: ~azure.mgmt.databoxedge.v2020_09_01.models.UpdateDownloadProgress
:ivar install_progress: The install progress.
:vartype install_progress: ~azure.mgmt.databoxedge.v2020_09_01.models.UpdateInstallProgress
:ivar total_refresh_errors: Total number of errors encountered during the refresh process.
:vartype total_refresh_errors: int
:ivar error_manifest_file: Local share/remote container relative path to the error manifest
file of the refresh.
:vartype error_manifest_file: str
:ivar refreshed_entity_id: ARM ID of the entity that was refreshed.
:vartype refreshed_entity_id: str
:param folder: If only subfolders need to be refreshed, then the subfolder path inside the
share or container. (The path is empty if there are no subfolders.).
:type folder: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'status': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'percent_complete': {'readonly': True},
'error': {'readonly': True},
'job_type': {'readonly': True},
'current_stage': {'readonly': True},
'download_progress': {'readonly': True},
'install_progress': {'readonly': True},
'total_refresh_errors': {'readonly': True},
'error_manifest_file': {'readonly': True},
'refreshed_entity_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'percent_complete': {'key': 'percentComplete', 'type': 'int'},
'error': {'key': 'error', 'type': 'JobErrorDetails'},
'job_type': {'key': 'properties.jobType', 'type': 'str'},
'current_stage': {'key': 'properties.currentStage', 'type': 'str'},
'download_progress': {'key': 'properties.downloadProgress', 'type': 'UpdateDownloadProgress'},
'install_progress': {'key': 'properties.installProgress', 'type': 'UpdateInstallProgress'},
'total_refresh_errors': {'key': 'properties.totalRefreshErrors', 'type': 'int'},
'error_manifest_file': {'key': 'properties.errorManifestFile', 'type': 'str'},
'refreshed_entity_id': {'key': 'properties.refreshedEntityId', 'type': 'str'},
'folder': {'key': 'properties.folder', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Job, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.status = None
self.start_time = None
self.end_time = None
self.percent_complete = None
self.error = None
self.job_type = None
self.current_stage = None
self.download_progress = None
self.install_progress = None
self.total_refresh_errors = None
self.error_manifest_file = None
self.refreshed_entity_id = None
self.folder = kwargs.get('folder', None)
class JobErrorDetails(msrest.serialization.Model):
"""The job error information containing the list of job errors.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error_details: The error details.
:vartype error_details: list[~azure.mgmt.databoxedge.v2020_09_01.models.JobErrorItem]
:ivar code: The code intended for programmatic access.
:vartype code: str
:ivar message: The message that describes the error in detail.
:vartype message: str
"""
_validation = {
'error_details': {'readonly': True},
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'error_details': {'key': 'errorDetails', 'type': '[JobErrorItem]'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobErrorDetails, self).__init__(**kwargs)
self.error_details = None
self.code = None
self.message = None
class JobErrorItem(msrest.serialization.Model):
"""The job error items.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar recommendations: The recommended actions.
:vartype recommendations: list[str]
:ivar code: The code intended for programmatic access.
:vartype code: str
:ivar message: The message that describes the error in detail.
:vartype message: str
"""
_validation = {
'recommendations': {'readonly': True},
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'recommendations': {'key': 'recommendations', 'type': '[str]'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobErrorItem, self).__init__(**kwargs)
self.recommendations = None
self.code = None
self.message = None
class KubernetesClusterInfo(msrest.serialization.Model):
"""Kubernetes cluster configuration.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar etcd_info: Etcd configuration.
:vartype etcd_info: ~azure.mgmt.databoxedge.v2020_09_01.models.EtcdInfo
:ivar nodes: Kubernetes cluster nodes.
:vartype nodes: list[~azure.mgmt.databoxedge.v2020_09_01.models.NodeInfo]
:param version: Required. Kubernetes cluster version.
:type version: str
"""
_validation = {
'etcd_info': {'readonly': True},
'nodes': {'readonly': True},
'version': {'required': True},
}
_attribute_map = {
'etcd_info': {'key': 'etcdInfo', 'type': 'EtcdInfo'},
'nodes': {'key': 'nodes', 'type': '[NodeInfo]'},
'version': {'key': 'version', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(KubernetesClusterInfo, self).__init__(**kwargs)
self.etcd_info = None
self.nodes = None
self.version = kwargs['version']
class KubernetesIPConfiguration(msrest.serialization.Model):
"""Kubernetes node IP configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar port: Port of the Kubernetes node.
:vartype port: str
:param ip_address: IP address of the Kubernetes node.
:type ip_address: str
"""
_validation = {
'port': {'readonly': True},
}
_attribute_map = {
'port': {'key': 'port', 'type': 'str'},
'ip_address': {'key': 'ipAddress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(KubernetesIPConfiguration, self).__init__(**kwargs)
self.port = None
self.ip_address = kwargs.get('ip_address', None)
class KubernetesRole(Role):
"""Kubernetes role.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param kind: Required. Role type.Constant filled by server. Possible values include: "IOT",
"ASA", "Functions", "Cognitive", "MEC", "CloudEdgeManagement", "Kubernetes".
:type kind: str or ~azure.mgmt.databoxedge.v2020_09_01.models.RoleTypes
:ivar system_data: Role configured on ASE resource.
:vartype system_data: ~azure.mgmt.databoxedge.v2020_09_01.models.SystemData
:param host_platform: Host OS supported by the Kubernetes role. Possible values include:
"Windows", "Linux".
:type host_platform: str or ~azure.mgmt.databoxedge.v2020_09_01.models.PlatformType
:ivar provisioning_state: State of Kubernetes deployment. Possible values include: "Invalid",
"Creating", "Created", "Updating", "Reconfiguring", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.databoxedge.v2020_09_01.models.KubernetesState
:ivar host_platform_type: Platform where the runtime is hosted. Possible values include:
"KubernetesCluster", "LinuxVM".
:vartype host_platform_type: str or ~azure.mgmt.databoxedge.v2020_09_01.models.HostPlatformType
:param kubernetes_cluster_info: Kubernetes cluster configuration.
:type kubernetes_cluster_info: ~azure.mgmt.databoxedge.v2020_09_01.models.KubernetesClusterInfo
:param kubernetes_role_resources: Kubernetes role resources.
:type kubernetes_role_resources:
~azure.mgmt.databoxedge.v2020_09_01.models.KubernetesRoleResources
:param role_status: Role status. Possible values include: "Enabled", "Disabled".
:type role_status: str or ~azure.mgmt.databoxedge.v2020_09_01.models.RoleStatus
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'required': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
'host_platform_type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'host_platform': {'key': 'properties.hostPlatform', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'host_platform_type': {'key': 'properties.hostPlatformType', 'type': 'str'},
'kubernetes_cluster_info': {'key': 'properties.kubernetesClusterInfo', 'type': 'KubernetesClusterInfo'},
'kubernetes_role_resources': {'key': 'properties.kubernetesRoleResources', 'type': 'KubernetesRoleResources'},
'role_status': {'key': 'properties.roleStatus', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(KubernetesRole, self).__init__(**kwargs)
self.kind = 'Kubernetes' # type: str
self.host_platform = kwargs.get('host_platform', None)
self.provisioning_state = None
self.host_platform_type = None
self.kubernetes_cluster_info = kwargs.get('kubernetes_cluster_info', None)
self.kubernetes_role_resources = kwargs.get('kubernetes_role_resources', None)
self.role_status = kwargs.get('role_status', None)
class KubernetesRoleCompute(msrest.serialization.Model):
"""Kubernetes role compute resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param vm_profile: Required. VM profile.
:type vm_profile: str
:ivar memory_in_bytes: Memory in bytes.
:vartype memory_in_bytes: long
:ivar processor_count: Processor count.
:vartype processor_count: int
"""
_validation = {
'vm_profile': {'required': True},
'memory_in_bytes': {'readonly': True},
'processor_count': {'readonly': True},
}
_attribute_map = {
'vm_profile': {'key': 'vmProfile', 'type': 'str'},
'memory_in_bytes': {'key': 'memoryInBytes', 'type': 'long'},
'processor_count': {'key': 'processorCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(KubernetesRoleCompute, self).__init__(**kwargs)
self.vm_profile = kwargs['vm_profile']
self.memory_in_bytes = None
self.processor_count = None
class KubernetesRoleNetwork(msrest.serialization.Model):
"""Kubernetes role network resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar cni_config: Cni configuration.
:vartype cni_config: ~azure.mgmt.databoxedge.v2020_09_01.models.CniConfig
:ivar load_balancer_config: Load balancer configuration.
:vartype load_balancer_config: ~azure.mgmt.databoxedge.v2020_09_01.models.LoadBalancerConfig
"""
_validation = {
'cni_config': {'readonly': True},
'load_balancer_config': {'readonly': True},
}
_attribute_map = {
'cni_config': {'key': 'cniConfig', 'type': 'CniConfig'},
'load_balancer_config': {'key': 'loadBalancerConfig', 'type': 'LoadBalancerConfig'},
}
def __init__(
self,
**kwargs
):
super(KubernetesRoleNetwork, self).__init__(**kwargs)
self.cni_config = None
self.load_balancer_config = None
class KubernetesRoleResources(msrest.serialization.Model):
"""Kubernetes role resources.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param storage: Kubernetes role storage resource.
:type storage: ~azure.mgmt.databoxedge.v2020_09_01.models.KubernetesRoleStorage
:param compute: Required. Kubernetes role compute resource.
:type compute: ~azure.mgmt.databoxedge.v2020_09_01.models.KubernetesRoleCompute
:ivar network: Kubernetes role network resource.
:vartype network: ~azure.mgmt.databoxedge.v2020_09_01.models.KubernetesRoleNetwork
"""
_validation = {
'compute': {'required': True},
'network': {'readonly': True},
}
_attribute_map = {
'storage': {'key': 'storage', 'type': 'KubernetesRoleStorage'},
'compute': {'key': 'compute', 'type': 'KubernetesRoleCompute'},
'network': {'key': 'network', 'type': 'KubernetesRoleNetwork'},
}
def __init__(
self,
**kwargs
):
super(KubernetesRoleResources, self).__init__(**kwargs)
self.storage = kwargs.get('storage', None)
self.compute = kwargs['compute']
self.network = None
class KubernetesRoleStorage(msrest.serialization.Model):
"""Kubernetes role storage resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar storage_classes: Kubernetes storage class info.
:vartype storage_classes:
list[~azure.mgmt.databoxedge.v2020_09_01.models.KubernetesRoleStorageClassInfo]
:param endpoints: Mount points of shares in role(s).
:type endpoints: list[~azure.mgmt.databoxedge.v2020_09_01.models.MountPointMap]
"""
_validation = {
'storage_classes': {'readonly': True},
}
_attribute_map = {
'storage_classes': {'key': 'storageClasses', 'type': '[KubernetesRoleStorageClassInfo]'},
'endpoints': {'key': 'endpoints', 'type': '[MountPointMap]'},
}
def __init__(
self,
**kwargs
):
super(KubernetesRoleStorage, self).__init__(**kwargs)
self.storage_classes = None
self.endpoints = kwargs.get('endpoints', None)
class KubernetesRoleStorageClassInfo(msrest.serialization.Model):
"""Kubernetes storage class info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Storage class name.
:vartype name: str
:ivar type: Storage class type.
:vartype type: str
:ivar posix_compliant: If provisioned storage is posix compliant. Possible values include:
"Invalid", "Enabled", "Disabled".
:vartype posix_compliant: str or
~azure.mgmt.databoxedge.v2020_09_01.models.PosixComplianceStatus
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'posix_compliant': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'posix_compliant': {'key': 'posixCompliant', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(KubernetesRoleStorageClassInfo, self).__init__(**kwargs)
self.name = None
self.type = None
self.posix_compliant = None
class LoadBalancerConfig(msrest.serialization.Model):
"""Load balancer configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: Load balancer type.
:vartype type: str
:ivar version: Load balancer version.
:vartype version: str
"""
_validation = {
'type': {'readonly': True},
'version': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LoadBalancerConfig, self).__init__(**kwargs)
self.type = None
self.version = None
class MECRole(Role):
"""MEC role.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param kind: Required. Role type.Constant filled by server. Possible values include: "IOT",
"ASA", "Functions", "Cognitive", "MEC", "CloudEdgeManagement", "Kubernetes".
:type kind: str or ~azure.mgmt.databoxedge.v2020_09_01.models.RoleTypes
:ivar system_data: Role configured on ASE resource.
:vartype system_data: ~azure.mgmt.databoxedge.v2020_09_01.models.SystemData
:param connection_string: Activation key of the MEC.
:type connection_string: ~azure.mgmt.databoxedge.v2020_09_01.models.AsymmetricEncryptedSecret
:param role_status: Role status. Possible values include: "Enabled", "Disabled".
:type role_status: str or ~azure.mgmt.databoxedge.v2020_09_01.models.RoleStatus
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'required': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'connection_string': {'key': 'properties.connectionString', 'type': 'AsymmetricEncryptedSecret'},
'role_status': {'key': 'properties.roleStatus', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MECRole, self).__init__(**kwargs)
self.kind = 'MEC' # type: str
self.connection_string = kwargs.get('connection_string', None)
self.role_status = kwargs.get('role_status', None)
class MetricConfiguration(msrest.serialization.Model):
"""Metric configuration.
All required parameters must be populated in order to send to Azure.
:param resource_id: Required. The Resource ID on which the metrics should be pushed.
:type resource_id: str
:param mdm_account: The MDM account to which the counters should be pushed.
:type mdm_account: str
:param metric_name_space: The MDM namespace to which the counters should be pushed. This is
required if MDMAccount is specified.
:type metric_name_space: str
:param counter_sets: Required. Host name for the IoT hub associated to the device.
:type counter_sets: list[~azure.mgmt.databoxedge.v2020_09_01.models.MetricCounterSet]
"""
_validation = {
'resource_id': {'required': True},
'counter_sets': {'required': True},
}
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
'mdm_account': {'key': 'mdmAccount', 'type': 'str'},
'metric_name_space': {'key': 'metricNameSpace', 'type': 'str'},
'counter_sets': {'key': 'counterSets', 'type': '[MetricCounterSet]'},
}
def __init__(
self,
**kwargs
):
super(MetricConfiguration, self).__init__(**kwargs)
self.resource_id = kwargs['resource_id']
self.mdm_account = kwargs.get('mdm_account', None)
self.metric_name_space = kwargs.get('metric_name_space', None)
self.counter_sets = kwargs['counter_sets']
class MetricCounter(msrest.serialization.Model):
"""The metric counter.
All required parameters must be populated in order to send to Azure.
:param name: Required. The counter name.
:type name: str
:param instance: The instance from which counter should be collected.
:type instance: str
:param dimension_filter: The dimension filter.
:type dimension_filter: list[~azure.mgmt.databoxedge.v2020_09_01.models.MetricDimension]
:param additional_dimensions: The additional dimensions to be added to metric.
:type additional_dimensions: list[~azure.mgmt.databoxedge.v2020_09_01.models.MetricDimension]
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'instance': {'key': 'instance', 'type': 'str'},
'dimension_filter': {'key': 'dimensionFilter', 'type': '[MetricDimension]'},
'additional_dimensions': {'key': 'additionalDimensions', 'type': '[MetricDimension]'},
}
def __init__(
self,
**kwargs
):
super(MetricCounter, self).__init__(**kwargs)
self.name = kwargs['name']
self.instance = kwargs.get('instance', None)
self.dimension_filter = kwargs.get('dimension_filter', None)
self.additional_dimensions = kwargs.get('additional_dimensions', None)
class MetricCounterSet(msrest.serialization.Model):
"""The metric counter set.
All required parameters must be populated in order to send to Azure.
:param counters: Required. The counters that should be collected in this set.
:type counters: list[~azure.mgmt.databoxedge.v2020_09_01.models.MetricCounter]
"""
_validation = {
'counters': {'required': True},
}
_attribute_map = {
'counters': {'key': 'counters', 'type': '[MetricCounter]'},
}
def __init__(
self,
**kwargs
):
super(MetricCounterSet, self).__init__(**kwargs)
self.counters = kwargs['counters']
class MetricDimension(msrest.serialization.Model):
"""The metric dimension.
All required parameters must be populated in order to send to Azure.
:param source_type: Required. The dimension type.
:type source_type: str
:param source_name: Required. The dimension value.
:type source_name: str
"""
_validation = {
'source_type': {'required': True},
'source_name': {'required': True},
}
_attribute_map = {
'source_type': {'key': 'sourceType', 'type': 'str'},
'source_name': {'key': 'sourceName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MetricDimension, self).__init__(**kwargs)
self.source_type = kwargs['source_type']
self.source_name = kwargs['source_name']
class MetricDimensionV1(msrest.serialization.Model):
"""Metric Dimension v1.
:param name: Name of the metrics dimension.
:type name: str
:param display_name: Display name of the metrics dimension.
:type display_name: str
:param to_be_exported_for_shoebox: To be exported to shoe box.
:type to_be_exported_for_shoebox: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(MetricDimensionV1, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display_name = kwargs.get('display_name', None)
self.to_be_exported_for_shoebox = kwargs.get('to_be_exported_for_shoebox', None)
class MetricSpecificationV1(msrest.serialization.Model):
"""Metric specification version 1.
:param name: Name of the metric.
:type name: str
:param display_name: Display name of the metric.
:type display_name: str
:param display_description: Description of the metric to be displayed.
:type display_description: str
:param unit: Metric units. Possible values include: "NotSpecified", "Percent", "Count",
"Seconds", "Milliseconds", "Bytes", "BytesPerSecond", "CountPerSecond".
:type unit: str or ~azure.mgmt.databoxedge.v2020_09_01.models.MetricUnit
:param aggregation_type: Metric aggregation type. Possible values include: "NotSpecified",
"None", "Average", "Minimum", "Maximum", "Total", "Count".
:type aggregation_type: str or ~azure.mgmt.databoxedge.v2020_09_01.models.MetricAggregationType
:param dimensions: Metric dimensions, other than default dimension which is resource.
:type dimensions: list[~azure.mgmt.databoxedge.v2020_09_01.models.MetricDimensionV1]
:param fill_gap_with_zero: Set true to fill the gaps with zero.
:type fill_gap_with_zero: bool
:param category: Metric category. Possible values include: "Capacity", "Transaction".
:type category: str or ~azure.mgmt.databoxedge.v2020_09_01.models.MetricCategory
:param resource_id_dimension_name_override: Resource name override.
:type resource_id_dimension_name_override: str
:param supported_time_grain_types: Support granularity of metrics.
:type supported_time_grain_types: list[str or
~azure.mgmt.databoxedge.v2020_09_01.models.TimeGrain]
:param supported_aggregation_types: Support metric aggregation type.
:type supported_aggregation_types: list[str or
~azure.mgmt.databoxedge.v2020_09_01.models.MetricAggregationType]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'dimensions': {'key': 'dimensions', 'type': '[MetricDimensionV1]'},
'fill_gap_with_zero': {'key': 'fillGapWithZero', 'type': 'bool'},
'category': {'key': 'category', 'type': 'str'},
'resource_id_dimension_name_override': {'key': 'resourceIdDimensionNameOverride', 'type': 'str'},
'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'},
'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(MetricSpecificationV1, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display_name = kwargs.get('display_name', None)
self.display_description = kwargs.get('display_description', None)
self.unit = kwargs.get('unit', None)
self.aggregation_type = kwargs.get('aggregation_type', None)
self.dimensions = kwargs.get('dimensions', None)
self.fill_gap_with_zero = kwargs.get('fill_gap_with_zero', None)
self.category = kwargs.get('category', None)
self.resource_id_dimension_name_override = kwargs.get('resource_id_dimension_name_override', None)
self.supported_time_grain_types = kwargs.get('supported_time_grain_types', None)
self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None)
class MonitoringMetricConfiguration(ARMBaseModel):
"""The metric setting details for the role.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param metric_configurations: Required. The metrics configuration details.
:type metric_configurations:
list[~azure.mgmt.databoxedge.v2020_09_01.models.MetricConfiguration]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'metric_configurations': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'metric_configurations': {'key': 'properties.metricConfigurations', 'type': '[MetricConfiguration]'},
}
def __init__(
self,
**kwargs
):
super(MonitoringMetricConfiguration, self).__init__(**kwargs)
self.metric_configurations = kwargs['metric_configurations']
class MonitoringMetricConfigurationList(msrest.serialization.Model):
"""Collection of metric configurations.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of metric configurations.
:vartype value: list[~azure.mgmt.databoxedge.v2020_09_01.models.MonitoringMetricConfiguration]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[MonitoringMetricConfiguration]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MonitoringMetricConfigurationList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class MountPointMap(msrest.serialization.Model):
"""The share mount point.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param share_id: Required. ID of the share mounted to the role VM.
:type share_id: str
:ivar role_id: ID of the role to which share is mounted.
:vartype role_id: str
:ivar mount_point: Mount point for the share.
:vartype mount_point: str
:ivar mount_type: Mounting type. Possible values include: "Volume", "HostPath".
:vartype mount_type: str or ~azure.mgmt.databoxedge.v2020_09_01.models.MountType
:ivar role_type: Role type. Possible values include: "IOT", "ASA", "Functions", "Cognitive",
"MEC", "CloudEdgeManagement", "Kubernetes".
:vartype role_type: str or ~azure.mgmt.databoxedge.v2020_09_01.models.RoleTypes
"""
_validation = {
'share_id': {'required': True},
'role_id': {'readonly': True},
'mount_point': {'readonly': True},
'mount_type': {'readonly': True},
'role_type': {'readonly': True},
}
_attribute_map = {
'share_id': {'key': 'shareId', 'type': 'str'},
'role_id': {'key': 'roleId', 'type': 'str'},
'mount_point': {'key': 'mountPoint', 'type': 'str'},
'mount_type': {'key': 'mountType', 'type': 'str'},
'role_type': {'key': 'roleType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MountPointMap, self).__init__(**kwargs)
self.share_id = kwargs['share_id']
self.role_id = None
self.mount_point = None
self.mount_type = None
self.role_type = None
class NetworkAdapter(msrest.serialization.Model):
"""Represents the networkAdapter on a device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar adapter_id: Instance ID of network adapter.
:vartype adapter_id: str
:ivar adapter_position: Hardware position of network adapter.
:vartype adapter_position: ~azure.mgmt.databoxedge.v2020_09_01.models.NetworkAdapterPosition
:ivar index: Logical index of the adapter.
:vartype index: int
:ivar node_id: Node ID of the network adapter.
:vartype node_id: str
:ivar network_adapter_name: Network adapter name.
:vartype network_adapter_name: str
:ivar label: Hardware label for the adapter.
:vartype label: str
:ivar mac_address: MAC address.
:vartype mac_address: str
:ivar link_speed: Link speed.
:vartype link_speed: long
:ivar status: Value indicating whether this adapter is valid. Possible values include:
"Inactive", "Active".
:vartype status: str or ~azure.mgmt.databoxedge.v2020_09_01.models.NetworkAdapterStatus
:param rdma_status: Value indicating whether this adapter is RDMA capable. Possible values
include: "Incapable", "Capable".
:type rdma_status: str or ~azure.mgmt.databoxedge.v2020_09_01.models.NetworkAdapterRDMAStatus
:param dhcp_status: Value indicating whether this adapter has DHCP enabled. Possible values
include: "Disabled", "Enabled".
:type dhcp_status: str or ~azure.mgmt.databoxedge.v2020_09_01.models.NetworkAdapterDHCPStatus
:ivar ipv4_configuration: The IPv4 configuration of the network adapter.
:vartype ipv4_configuration: ~azure.mgmt.databoxedge.v2020_09_01.models.Ipv4Config
:ivar ipv6_configuration: The IPv6 configuration of the network adapter.
:vartype ipv6_configuration: ~azure.mgmt.databoxedge.v2020_09_01.models.Ipv6Config
:ivar ipv6_link_local_address: The IPv6 local address.
:vartype ipv6_link_local_address: str
:ivar dns_servers: The list of DNS Servers of the device.
:vartype dns_servers: list[str]
"""
_validation = {
'adapter_id': {'readonly': True},
'adapter_position': {'readonly': True},
'index': {'readonly': True},
'node_id': {'readonly': True},
'network_adapter_name': {'readonly': True},
'label': {'readonly': True},
'mac_address': {'readonly': True},
'link_speed': {'readonly': True},
'status': {'readonly': True},
'ipv4_configuration': {'readonly': True},
'ipv6_configuration': {'readonly': True},
'ipv6_link_local_address': {'readonly': True},
'dns_servers': {'readonly': True},
}
_attribute_map = {
'adapter_id': {'key': 'adapterId', 'type': 'str'},
'adapter_position': {'key': 'adapterPosition', 'type': 'NetworkAdapterPosition'},
'index': {'key': 'index', 'type': 'int'},
'node_id': {'key': 'nodeId', 'type': 'str'},
'network_adapter_name': {'key': 'networkAdapterName', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'mac_address': {'key': 'macAddress', 'type': 'str'},
'link_speed': {'key': 'linkSpeed', 'type': 'long'},
'status': {'key': 'status', 'type': 'str'},
'rdma_status': {'key': 'rdmaStatus', 'type': 'str'},
'dhcp_status': {'key': 'dhcpStatus', 'type': 'str'},
'ipv4_configuration': {'key': 'ipv4Configuration', 'type': 'Ipv4Config'},
'ipv6_configuration': {'key': 'ipv6Configuration', 'type': 'Ipv6Config'},
'ipv6_link_local_address': {'key': 'ipv6LinkLocalAddress', 'type': 'str'},
'dns_servers': {'key': 'dnsServers', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(NetworkAdapter, self).__init__(**kwargs)
self.adapter_id = None
self.adapter_position = None
self.index = None
self.node_id = None
self.network_adapter_name = None
self.label = None
self.mac_address = None
self.link_speed = None
self.status = None
self.rdma_status = kwargs.get('rdma_status', None)
self.dhcp_status = kwargs.get('dhcp_status', None)
self.ipv4_configuration = None
self.ipv6_configuration = None
self.ipv6_link_local_address = None
self.dns_servers = None
class NetworkAdapterPosition(msrest.serialization.Model):
"""The network adapter position.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar network_group: The network group. Possible values include: "None", "NonRDMA", "RDMA".
:vartype network_group: str or ~azure.mgmt.databoxedge.v2020_09_01.models.NetworkGroup
:ivar port: The port.
:vartype port: int
"""
_validation = {
'network_group': {'readonly': True},
'port': {'readonly': True},
}
_attribute_map = {
'network_group': {'key': 'networkGroup', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(NetworkAdapterPosition, self).__init__(**kwargs)
self.network_group = None
self.port = None
class NetworkSettings(ARMBaseModel):
"""The network settings of a device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:ivar network_adapters: The network adapter list on the device.
:vartype network_adapters: list[~azure.mgmt.databoxedge.v2020_09_01.models.NetworkAdapter]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'network_adapters': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'network_adapters': {'key': 'properties.networkAdapters', 'type': '[NetworkAdapter]'},
}
def __init__(
self,
**kwargs
):
super(NetworkSettings, self).__init__(**kwargs)
self.network_adapters = None
class Node(ARMBaseModel):
"""Represents a single node in a Data box Edge/Gateway device
Gateway devices, standalone Edge devices and a single node cluster Edge device will all have 1 node
Multi-node Edge devices will have more than 1 nodes.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:ivar node_status: The current status of the individual node. Possible values include:
"Unknown", "Up", "Down", "Rebooting", "ShuttingDown".
:vartype node_status: str or ~azure.mgmt.databoxedge.v2020_09_01.models.NodeStatus
:ivar node_chassis_serial_number: Serial number of the Chassis.
:vartype node_chassis_serial_number: str
:ivar node_serial_number: Serial number of the individual node.
:vartype node_serial_number: str
:ivar node_display_name: Display Name of the individual node.
:vartype node_display_name: str
:ivar node_friendly_software_version: Friendly software version name that is currently
installed on the node.
:vartype node_friendly_software_version: str
:ivar node_hcs_version: HCS version that is currently installed on the node.
:vartype node_hcs_version: str
:ivar node_instance_id: Guid instance id of the node.
:vartype node_instance_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'node_status': {'readonly': True},
'node_chassis_serial_number': {'readonly': True},
'node_serial_number': {'readonly': True},
'node_display_name': {'readonly': True},
'node_friendly_software_version': {'readonly': True},
'node_hcs_version': {'readonly': True},
'node_instance_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'node_status': {'key': 'properties.nodeStatus', 'type': 'str'},
'node_chassis_serial_number': {'key': 'properties.nodeChassisSerialNumber', 'type': 'str'},
'node_serial_number': {'key': 'properties.nodeSerialNumber', 'type': 'str'},
'node_display_name': {'key': 'properties.nodeDisplayName', 'type': 'str'},
'node_friendly_software_version': {'key': 'properties.nodeFriendlySoftwareVersion', 'type': 'str'},
'node_hcs_version': {'key': 'properties.nodeHcsVersion', 'type': 'str'},
'node_instance_id': {'key': 'properties.nodeInstanceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Node, self).__init__(**kwargs)
self.node_status = None
self.node_chassis_serial_number = None
self.node_serial_number = None
self.node_display_name = None
self.node_friendly_software_version = None
self.node_hcs_version = None
self.node_instance_id = None
class NodeInfo(msrest.serialization.Model):
"""Kubernetes node info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Node name.
:vartype name: str
:ivar type: Node type - Master/Worker. Possible values include: "Invalid", "Master", "Worker".
:vartype type: str or ~azure.mgmt.databoxedge.v2020_09_01.models.KubernetesNodeType
:param ip_configuration: IP Configuration of the Kubernetes node.
:type ip_configuration:
list[~azure.mgmt.databoxedge.v2020_09_01.models.KubernetesIPConfiguration]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'ip_configuration': {'key': 'ipConfiguration', 'type': '[KubernetesIPConfiguration]'},
}
def __init__(
self,
**kwargs
):
super(NodeInfo, self).__init__(**kwargs)
self.name = None
self.type = None
self.ip_configuration = kwargs.get('ip_configuration', None)
class NodeList(msrest.serialization.Model):
"""Collection of Nodes.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of Nodes.
:vartype value: list[~azure.mgmt.databoxedge.v2020_09_01.models.Node]
:param next_link: Link to the next set of results.
:type next_link: str
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Node]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NodeList, self).__init__(**kwargs)
self.value = None
self.next_link = kwargs.get('next_link', None)
class Operation(msrest.serialization.Model):
"""Operations.
:param name: Name of the operation.
:type name: str
:param display: Properties displayed for the operation.
:type display: ~azure.mgmt.databoxedge.v2020_09_01.models.OperationDisplay
:param origin: Origin of the operation.
:type origin: str
:param is_data_action: Indicates whether the operation is a data action.
:type is_data_action: bool
:param service_specification: Service specification.
:type service_specification: ~azure.mgmt.databoxedge.v2020_09_01.models.ServiceSpecification
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'service_specification': {'key': 'properties.serviceSpecification', 'type': 'ServiceSpecification'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display = kwargs.get('display', None)
self.origin = kwargs.get('origin', None)
self.is_data_action = kwargs.get('is_data_action', None)
self.service_specification = kwargs.get('service_specification', None)
class OperationDisplay(msrest.serialization.Model):
"""Operation display properties.
:param provider: Provider name.
:type provider: str
:param resource: The type of resource in which the operation is performed.
:type resource: str
:param operation: Operation to be performed on the resource.
:type operation: str
:param description: Description of the operation to be performed.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class OperationsList(msrest.serialization.Model):
"""The list of operations used for the discovery of available provider operations.
All required parameters must be populated in order to send to Azure.
:param value: Required. The value.
:type value: list[~azure.mgmt.databoxedge.v2020_09_01.models.Operation]
:param next_link: Link to the next set of results.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationsList, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = kwargs.get('next_link', None)
class Order(ARMBaseModel):
"""The order details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param contact_information: The contact details.
:type contact_information: ~azure.mgmt.databoxedge.v2020_09_01.models.ContactDetails
:param shipping_address: The shipping address.
:type shipping_address: ~azure.mgmt.databoxedge.v2020_09_01.models.Address
:ivar current_status: Current status of the order.
:vartype current_status: ~azure.mgmt.databoxedge.v2020_09_01.models.OrderStatus
:ivar order_history: List of status changes in the order.
:vartype order_history: list[~azure.mgmt.databoxedge.v2020_09_01.models.OrderStatus]
:ivar serial_number: Serial number of the device.
:vartype serial_number: str
:ivar delivery_tracking_info: Tracking information for the package delivered to the customer
whether it has an original or a replacement device.
:vartype delivery_tracking_info: list[~azure.mgmt.databoxedge.v2020_09_01.models.TrackingInfo]
:ivar return_tracking_info: Tracking information for the package returned from the customer
whether it has an original or a replacement device.
:vartype return_tracking_info: list[~azure.mgmt.databoxedge.v2020_09_01.models.TrackingInfo]
:param shipment_type: ShipmentType of the order. Possible values include: "NotApplicable",
"ShippedToCustomer", "SelfPickup".
:type shipment_type: str or ~azure.mgmt.databoxedge.v2020_09_01.models.ShipmentType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'current_status': {'readonly': True},
'order_history': {'readonly': True},
'serial_number': {'readonly': True},
'delivery_tracking_info': {'readonly': True},
'return_tracking_info': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'contact_information': {'key': 'properties.contactInformation', 'type': 'ContactDetails'},
'shipping_address': {'key': 'properties.shippingAddress', 'type': 'Address'},
'current_status': {'key': 'properties.currentStatus', 'type': 'OrderStatus'},
'order_history': {'key': 'properties.orderHistory', 'type': '[OrderStatus]'},
'serial_number': {'key': 'properties.serialNumber', 'type': 'str'},
'delivery_tracking_info': {'key': 'properties.deliveryTrackingInfo', 'type': '[TrackingInfo]'},
'return_tracking_info': {'key': 'properties.returnTrackingInfo', 'type': '[TrackingInfo]'},
'shipment_type': {'key': 'properties.shipmentType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Order, self).__init__(**kwargs)
self.contact_information = kwargs.get('contact_information', None)
self.shipping_address = kwargs.get('shipping_address', None)
self.current_status = None
self.order_history = None
self.serial_number = None
self.delivery_tracking_info = None
self.return_tracking_info = None
self.shipment_type = kwargs.get('shipment_type', None)
class OrderList(msrest.serialization.Model):
"""List of order entities.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of orders.
:vartype value: list[~azure.mgmt.databoxedge.v2020_09_01.models.Order]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Order]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OrderList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class OrderStatus(msrest.serialization.Model):
"""Represents a single status change.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param status: Required. Status of the order as per the allowed status types. Possible values
include: "Untracked", "AwaitingFulfilment", "AwaitingPreparation", "AwaitingShipment",
"Shipped", "Arriving", "Delivered", "ReplacementRequested", "LostDevice", "Declined",
"ReturnInitiated", "AwaitingReturnShipment", "ShippedBack", "CollectedAtMicrosoft",
"AwaitingPickup", "PickupCompleted", "AwaitingDrop".
:type status: str or ~azure.mgmt.databoxedge.v2020_09_01.models.OrderState
:ivar update_date_time: Time of status update.
:vartype update_date_time: ~datetime.datetime
:param comments: Comments related to this status change.
:type comments: str
:ivar tracking_information: Tracking information related to the state in the ordering flow.
:vartype tracking_information: ~azure.mgmt.databoxedge.v2020_09_01.models.TrackingInfo
:ivar additional_order_details: Dictionary to hold generic information which is not stored
by the already existing properties.
:vartype additional_order_details: dict[str, str]
"""
_validation = {
'status': {'required': True},
'update_date_time': {'readonly': True},
'tracking_information': {'readonly': True},
'additional_order_details': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'update_date_time': {'key': 'updateDateTime', 'type': 'iso-8601'},
'comments': {'key': 'comments', 'type': 'str'},
'tracking_information': {'key': 'trackingInformation', 'type': 'TrackingInfo'},
'additional_order_details': {'key': 'additionalOrderDetails', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(OrderStatus, self).__init__(**kwargs)
self.status = kwargs['status']
self.update_date_time = None
self.comments = kwargs.get('comments', None)
self.tracking_information = None
self.additional_order_details = None
class PeriodicTimerEventTrigger(Trigger):
"""Trigger details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:ivar system_data: Trigger in DataBoxEdge Resource.
:vartype system_data: ~azure.mgmt.databoxedge.v2020_09_01.models.SystemData
:param kind: Required. Trigger Kind.Constant filled by server. Possible values include:
"FileEvent", "PeriodicTimerEvent".
:type kind: str or ~azure.mgmt.databoxedge.v2020_09_01.models.TriggerEventType
:param source_info: Required. Periodic timer details.
:type source_info: ~azure.mgmt.databoxedge.v2020_09_01.models.PeriodicTimerSourceInfo
:param sink_info: Required. Role Sink information.
:type sink_info: ~azure.mgmt.databoxedge.v2020_09_01.models.RoleSinkInfo
:param custom_context_tag: A custom context tag typically used to correlate the trigger against
its usage. For example, if a periodic timer trigger is intended for certain specific IoT
modules in the device, the tag can be the name or the image URL of the module.
:type custom_context_tag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'kind': {'required': True},
'source_info': {'required': True},
'sink_info': {'required': True},
'custom_context_tag': {'max_length': 192, 'min_length': 0},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'kind': {'key': 'kind', 'type': 'str'},
'source_info': {'key': 'properties.sourceInfo', 'type': 'PeriodicTimerSourceInfo'},
'sink_info': {'key': 'properties.sinkInfo', 'type': 'RoleSinkInfo'},
'custom_context_tag': {'key': 'properties.customContextTag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PeriodicTimerEventTrigger, self).__init__(**kwargs)
self.kind = 'PeriodicTimerEvent' # type: str
self.source_info = kwargs['source_info']
self.sink_info = kwargs['sink_info']
self.custom_context_tag = kwargs.get('custom_context_tag', None)
class PeriodicTimerSourceInfo(msrest.serialization.Model):
"""Periodic timer event source.
All required parameters must be populated in order to send to Azure.
:param start_time: Required. The time of the day that results in a valid trigger. Schedule is
computed with reference to the time specified upto seconds. If timezone is not specified the
time will considered to be in device timezone. The value will always be returned as UTC time.
:type start_time: ~datetime.datetime
:param schedule: Required. Periodic frequency at which timer event needs to be raised. Supports
daily, hourly, minutes, and seconds.
:type schedule: str
:param topic: Topic where periodic events are published to IoT device.
:type topic: str
"""
_validation = {
'start_time': {'required': True},
'schedule': {'required': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'schedule': {'key': 'schedule', 'type': 'str'},
'topic': {'key': 'topic', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PeriodicTimerSourceInfo, self).__init__(**kwargs)
self.start_time = kwargs['start_time']
self.schedule = kwargs['schedule']
self.topic = kwargs.get('topic', None)
class RefreshDetails(msrest.serialization.Model):
"""Fields for tracking refresh job on the share or container.
:param in_progress_refresh_job_id: If a refresh job is currently in progress on this share or
container, this field indicates the ARM resource ID of that job. The field is empty if no job
is in progress.
:type in_progress_refresh_job_id: str
:param last_completed_refresh_job_time_in_utc: Indicates the completed time for the last
refresh job on this particular share or container, if any.This could be a failed job or a
successful job.
:type last_completed_refresh_job_time_in_utc: ~datetime.datetime
:param error_manifest_file: Indicates the relative path of the error xml for the last refresh
job on this particular share or container, if any. This could be a failed job or a successful
job.
:type error_manifest_file: str
:param last_job: Indicates the id of the last refresh job on this particular share or
container,if any. This could be a failed job or a successful job.
:type last_job: str
"""
_attribute_map = {
'in_progress_refresh_job_id': {'key': 'inProgressRefreshJobId', 'type': 'str'},
'last_completed_refresh_job_time_in_utc': {'key': 'lastCompletedRefreshJobTimeInUTC', 'type': 'iso-8601'},
'error_manifest_file': {'key': 'errorManifestFile', 'type': 'str'},
'last_job': {'key': 'lastJob', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RefreshDetails, self).__init__(**kwargs)
self.in_progress_refresh_job_id = kwargs.get('in_progress_refresh_job_id', None)
self.last_completed_refresh_job_time_in_utc = kwargs.get('last_completed_refresh_job_time_in_utc', None)
self.error_manifest_file = kwargs.get('error_manifest_file', None)
self.last_job = kwargs.get('last_job', None)
class ResourceIdentity(msrest.serialization.Model):
"""Msi identity details of the resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param type: Identity type. Possible values include: "None", "SystemAssigned", "UserAssigned".
:type type: str or ~azure.mgmt.databoxedge.v2020_09_01.models.MsiIdentityType
:ivar principal_id: Service Principal Id backing the Msi.
:vartype principal_id: str
:ivar tenant_id: Home Tenant Id.
:vartype tenant_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceIdentity, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.principal_id = None
self.tenant_id = None
class ResourceMoveDetails(msrest.serialization.Model):
"""Fields for tracking resource move.
:param operation_in_progress: Denotes whether move operation is in progress. Possible values
include: "None", "ResourceMoveInProgress", "ResourceMoveFailed".
:type operation_in_progress: str or
~azure.mgmt.databoxedge.v2020_09_01.models.ResourceMoveStatus
:param operation_in_progress_lock_timeout_in_utc: Denotes the timeout of the operation to
finish.
:type operation_in_progress_lock_timeout_in_utc: ~datetime.datetime
"""
_attribute_map = {
'operation_in_progress': {'key': 'operationInProgress', 'type': 'str'},
'operation_in_progress_lock_timeout_in_utc': {'key': 'operationInProgressLockTimeoutInUTC', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(ResourceMoveDetails, self).__init__(**kwargs)
self.operation_in_progress = kwargs.get('operation_in_progress', None)
self.operation_in_progress_lock_timeout_in_utc = kwargs.get('operation_in_progress_lock_timeout_in_utc', None)
class ResourceTypeSku(msrest.serialization.Model):
"""Resource type Sku object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar resource_type: The resource type.
:vartype resource_type: str
:ivar skus: The skus.
:vartype skus: list[~azure.mgmt.databoxedge.v2020_09_01.models.SkuInformation]
"""
_validation = {
'resource_type': {'readonly': True},
'skus': {'readonly': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'skus': {'key': 'skus', 'type': '[SkuInformation]'},
}
def __init__(
self,
**kwargs
):
super(ResourceTypeSku, self).__init__(**kwargs)
self.resource_type = None
self.skus = None
class RoleList(msrest.serialization.Model):
"""Collection of all the roles on the Data Box Edge device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The Value.
:vartype value: list[~azure.mgmt.databoxedge.v2020_09_01.models.Role]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Role]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RoleList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class RoleSinkInfo(msrest.serialization.Model):
"""Compute role against which events will be raised.
All required parameters must be populated in order to send to Azure.
:param role_id: Required. Compute role ID.
:type role_id: str
"""
_validation = {
'role_id': {'required': True},
}
_attribute_map = {
'role_id': {'key': 'roleId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RoleSinkInfo, self).__init__(**kwargs)
self.role_id = kwargs['role_id']
class SecuritySettings(ARMBaseModel):
"""The security settings of a device.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param device_admin_password: Required. Device administrator password as an encrypted string
(encrypted using RSA PKCS #1) is used to sign into the local web UI of the device. The Actual
password should have at least 8 characters that are a combination of uppercase, lowercase,
numeric, and special characters.
:type device_admin_password:
~azure.mgmt.databoxedge.v2020_09_01.models.AsymmetricEncryptedSecret
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'device_admin_password': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'device_admin_password': {'key': 'properties.deviceAdminPassword', 'type': 'AsymmetricEncryptedSecret'},
}
def __init__(
self,
**kwargs
):
super(SecuritySettings, self).__init__(**kwargs)
self.device_admin_password = kwargs['device_admin_password']
class ServiceSpecification(msrest.serialization.Model):
"""Service specification.
:param metric_specifications: Metric specification as defined by shoebox.
:type metric_specifications:
list[~azure.mgmt.databoxedge.v2020_09_01.models.MetricSpecificationV1]
"""
_attribute_map = {
'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecificationV1]'},
}
def __init__(
self,
**kwargs
):
super(ServiceSpecification, self).__init__(**kwargs)
self.metric_specifications = kwargs.get('metric_specifications', None)
class Share(ARMBaseModel):
"""Represents a share on the Data Box Edge/Gateway device.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:ivar system_data: Share on ASE device.
:vartype system_data: ~azure.mgmt.databoxedge.v2020_09_01.models.SystemData
:param description: Description for the share.
:type description: str
:param share_status: Required. Current status of the share. Possible values include: "Offline",
"Unknown", "OK", "Updating", "NeedsAttention".
:type share_status: str or ~azure.mgmt.databoxedge.v2020_09_01.models.ShareStatus
:param monitoring_status: Required. Current monitoring status of the share. Possible values
include: "Enabled", "Disabled".
:type monitoring_status: str or ~azure.mgmt.databoxedge.v2020_09_01.models.MonitoringStatus
:param azure_container_info: Azure container mapping for the share.
:type azure_container_info: ~azure.mgmt.databoxedge.v2020_09_01.models.AzureContainerInfo
:param access_protocol: Required. Access protocol to be used by the share. Possible values
include: "SMB", "NFS".
:type access_protocol: str or ~azure.mgmt.databoxedge.v2020_09_01.models.ShareAccessProtocol
:param user_access_rights: Mapping of users and corresponding access rights on the share
(required for SMB protocol).
:type user_access_rights: list[~azure.mgmt.databoxedge.v2020_09_01.models.UserAccessRight]
:param client_access_rights: List of IP addresses and corresponding access rights on the
share(required for NFS protocol).
:type client_access_rights: list[~azure.mgmt.databoxedge.v2020_09_01.models.ClientAccessRight]
:param refresh_details: Details of the refresh job on this share.
:type refresh_details: ~azure.mgmt.databoxedge.v2020_09_01.models.RefreshDetails
:ivar share_mappings: Share mount point to the role.
:vartype share_mappings: list[~azure.mgmt.databoxedge.v2020_09_01.models.MountPointMap]
:param data_policy: Data policy of the share. Possible values include: "Cloud", "Local".
:type data_policy: str or ~azure.mgmt.databoxedge.v2020_09_01.models.DataPolicy
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'share_status': {'required': True},
'monitoring_status': {'required': True},
'access_protocol': {'required': True},
'share_mappings': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'description': {'key': 'properties.description', 'type': 'str'},
'share_status': {'key': 'properties.shareStatus', 'type': 'str'},
'monitoring_status': {'key': 'properties.monitoringStatus', 'type': 'str'},
'azure_container_info': {'key': 'properties.azureContainerInfo', 'type': 'AzureContainerInfo'},
'access_protocol': {'key': 'properties.accessProtocol', 'type': 'str'},
'user_access_rights': {'key': 'properties.userAccessRights', 'type': '[UserAccessRight]'},
'client_access_rights': {'key': 'properties.clientAccessRights', 'type': '[ClientAccessRight]'},
'refresh_details': {'key': 'properties.refreshDetails', 'type': 'RefreshDetails'},
'share_mappings': {'key': 'properties.shareMappings', 'type': '[MountPointMap]'},
'data_policy': {'key': 'properties.dataPolicy', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Share, self).__init__(**kwargs)
self.system_data = None
self.description = kwargs.get('description', None)
self.share_status = kwargs['share_status']
self.monitoring_status = kwargs['monitoring_status']
self.azure_container_info = kwargs.get('azure_container_info', None)
self.access_protocol = kwargs['access_protocol']
self.user_access_rights = kwargs.get('user_access_rights', None)
self.client_access_rights = kwargs.get('client_access_rights', None)
self.refresh_details = kwargs.get('refresh_details', None)
self.share_mappings = None
self.data_policy = kwargs.get('data_policy', None)
class ShareAccessRight(msrest.serialization.Model):
"""Specifies the mapping between this particular user and the type of access he has on shares on this device.
All required parameters must be populated in order to send to Azure.
:param share_id: Required. The share ID.
:type share_id: str
:param access_type: Required. Type of access to be allowed on the share for this user. Possible
values include: "Change", "Read", "Custom".
:type access_type: str or ~azure.mgmt.databoxedge.v2020_09_01.models.ShareAccessType
"""
_validation = {
'share_id': {'required': True},
'access_type': {'required': True},
}
_attribute_map = {
'share_id': {'key': 'shareId', 'type': 'str'},
'access_type': {'key': 'accessType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ShareAccessRight, self).__init__(**kwargs)
self.share_id = kwargs['share_id']
self.access_type = kwargs['access_type']
class ShareList(msrest.serialization.Model):
"""Collection of all the shares on the Data Box Edge/Gateway device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of shares.
:vartype value: list[~azure.mgmt.databoxedge.v2020_09_01.models.Share]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Share]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ShareList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class Sku(msrest.serialization.Model):
"""The SKU type.
:param name: SKU name. Possible values include: "Gateway", "Edge", "TEA_1Node",
"TEA_1Node_UPS", "TEA_1Node_Heater", "TEA_1Node_UPS_Heater", "TEA_4Node_Heater",
"TEA_4Node_UPS_Heater", "TMA", "TDC", "TCA_Small", "GPU", "TCA_Large", "EdgeP_Base",
"EdgeP_High", "EdgePR_Base", "EdgePR_Base_UPS", "EdgeMR_Mini", "RCA_Small", "RCA_Large", "RDC".
:type name: str or ~azure.mgmt.databoxedge.v2020_09_01.models.SkuName
:param tier: The SKU tier. This is based on the SKU name. Possible values include: "Standard".
:type tier: str or ~azure.mgmt.databoxedge.v2020_09_01.models.SkuTier
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.tier = kwargs.get('tier', None)
class SkuCost(msrest.serialization.Model):
"""The metadata for retrieving price info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar meter_id: Used for querying price from commerce.
:vartype meter_id: str
:ivar quantity: The cost quantity.
:vartype quantity: long
:ivar extended_unit: The extended unit.
:vartype extended_unit: str
"""
_validation = {
'meter_id': {'readonly': True},
'quantity': {'readonly': True},
'extended_unit': {'readonly': True},
}
_attribute_map = {
'meter_id': {'key': 'meterId', 'type': 'str'},
'quantity': {'key': 'quantity', 'type': 'long'},
'extended_unit': {'key': 'extendedUnit', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SkuCost, self).__init__(**kwargs)
self.meter_id = None
self.quantity = None
self.extended_unit = None
class SkuInformation(msrest.serialization.Model):
"""Sku information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The sku name.
:vartype name: str
:ivar tier: The sku tier.
:vartype tier: str
:ivar kind: The sku kind.
:vartype kind: str
:ivar family: The Sku family.
:vartype family: str
:ivar costs: The pricing info of the Sku.
:vartype costs: list[~azure.mgmt.databoxedge.v2020_09_01.models.SkuCost]
:ivar locations: The locations where Sku is available.
:vartype locations: list[str]
:ivar location_info: The locations where Sku is available with zones and sites info.
:vartype location_info: list[~azure.mgmt.databoxedge.v2020_09_01.models.SkuLocationInfo]
:ivar required_quota_ids: The required quotaIds for the sku to be available.
:vartype required_quota_ids: list[str]
:ivar required_features: The required features for the sku to be available.
:vartype required_features: list[str]
"""
_validation = {
'name': {'readonly': True},
'tier': {'readonly': True},
'kind': {'readonly': True},
'family': {'readonly': True},
'costs': {'readonly': True},
'locations': {'readonly': True},
'location_info': {'readonly': True},
'required_quota_ids': {'readonly': True},
'required_features': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
'costs': {'key': 'costs', 'type': '[SkuCost]'},
'locations': {'key': 'locations', 'type': '[str]'},
'location_info': {'key': 'locationInfo', 'type': '[SkuLocationInfo]'},
'required_quota_ids': {'key': 'requiredQuotaIds', 'type': '[str]'},
'required_features': {'key': 'requiredFeatures', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(SkuInformation, self).__init__(**kwargs)
self.name = None
self.tier = None
self.kind = None
self.family = None
self.costs = None
self.locations = None
self.location_info = None
self.required_quota_ids = None
self.required_features = None
class SkuInformationList(msrest.serialization.Model):
"""List of SKU Information objects.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of ResourceTypeSku objects.
:vartype value: list[~azure.mgmt.databoxedge.v2020_09_01.models.ResourceTypeSku]
:ivar next_link: Links to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceTypeSku]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SkuInformationList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class SkuLocationInfo(msrest.serialization.Model):
"""The location info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar location: The location.
:vartype location: str
:ivar zones: The zones.
:vartype zones: list[str]
:ivar sites: The sites.
:vartype sites: list[str]
"""
_validation = {
'location': {'readonly': True},
'zones': {'readonly': True},
'sites': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'zones': {'key': 'zones', 'type': '[str]'},
'sites': {'key': 'sites', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(SkuLocationInfo, self).__init__(**kwargs)
self.location = None
self.zones = None
self.sites = None
class StorageAccount(ARMBaseModel):
"""Represents a Storage Account on the Data Box Edge/Gateway device.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:ivar system_data: StorageAccount object on ASE device.
:vartype system_data: ~azure.mgmt.databoxedge.v2020_09_01.models.SystemData
:param description: Description for the storage Account.
:type description: str
:param storage_account_status: Current status of the storage account. Possible values include:
"OK", "Offline", "Unknown", "Updating", "NeedsAttention".
:type storage_account_status: str or
~azure.mgmt.databoxedge.v2020_09_01.models.StorageAccountStatus
:param data_policy: Required. Data policy of the storage Account. Possible values include:
"Cloud", "Local".
:type data_policy: str or ~azure.mgmt.databoxedge.v2020_09_01.models.DataPolicy
:param storage_account_credential_id: Storage Account Credential Id.
:type storage_account_credential_id: str
:ivar blob_endpoint: BlobEndpoint of Storage Account.
:vartype blob_endpoint: str
:ivar container_count: The Container Count. Present only for Storage Accounts with DataPolicy
set to Cloud.
:vartype container_count: int
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'data_policy': {'required': True},
'blob_endpoint': {'readonly': True},
'container_count': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'description': {'key': 'properties.description', 'type': 'str'},
'storage_account_status': {'key': 'properties.storageAccountStatus', 'type': 'str'},
'data_policy': {'key': 'properties.dataPolicy', 'type': 'str'},
'storage_account_credential_id': {'key': 'properties.storageAccountCredentialId', 'type': 'str'},
'blob_endpoint': {'key': 'properties.blobEndpoint', 'type': 'str'},
'container_count': {'key': 'properties.containerCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(StorageAccount, self).__init__(**kwargs)
self.system_data = None
self.description = kwargs.get('description', None)
self.storage_account_status = kwargs.get('storage_account_status', None)
self.data_policy = kwargs['data_policy']
self.storage_account_credential_id = kwargs.get('storage_account_credential_id', None)
self.blob_endpoint = None
self.container_count = None
class StorageAccountCredential(ARMBaseModel):
"""The storage account credential.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:ivar system_data: StorageAccountCredential object.
:vartype system_data: ~azure.mgmt.databoxedge.v2020_09_01.models.SystemData
:param alias: Required. Alias for the storage account.
:type alias: str
:param user_name: Username for the storage account.
:type user_name: str
:param account_key: Encrypted storage key.
:type account_key: ~azure.mgmt.databoxedge.v2020_09_01.models.AsymmetricEncryptedSecret
:param connection_string: Connection string for the storage account. Use this string if
username and account key are not specified.
:type connection_string: str
:param ssl_status: Required. Signifies whether SSL needs to be enabled or not. Possible values
include: "Enabled", "Disabled".
:type ssl_status: str or ~azure.mgmt.databoxedge.v2020_09_01.models.SSLStatus
:param blob_domain_name: Blob end point for private clouds.
:type blob_domain_name: str
:param account_type: Required. Type of storage accessed on the storage account. Possible values
include: "GeneralPurposeStorage", "BlobStorage".
:type account_type: str or ~azure.mgmt.databoxedge.v2020_09_01.models.AccountType
:param storage_account_id: Id of the storage account.
:type storage_account_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'alias': {'required': True},
'ssl_status': {'required': True},
'account_type': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'alias': {'key': 'properties.alias', 'type': 'str'},
'user_name': {'key': 'properties.userName', 'type': 'str'},
'account_key': {'key': 'properties.accountKey', 'type': 'AsymmetricEncryptedSecret'},
'connection_string': {'key': 'properties.connectionString', 'type': 'str'},
'ssl_status': {'key': 'properties.sslStatus', 'type': 'str'},
'blob_domain_name': {'key': 'properties.blobDomainName', 'type': 'str'},
'account_type': {'key': 'properties.accountType', 'type': 'str'},
'storage_account_id': {'key': 'properties.storageAccountId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageAccountCredential, self).__init__(**kwargs)
self.system_data = None
self.alias = kwargs['alias']
self.user_name = kwargs.get('user_name', None)
self.account_key = kwargs.get('account_key', None)
self.connection_string = kwargs.get('connection_string', None)
self.ssl_status = kwargs['ssl_status']
self.blob_domain_name = kwargs.get('blob_domain_name', None)
self.account_type = kwargs['account_type']
self.storage_account_id = kwargs.get('storage_account_id', None)
class StorageAccountCredentialList(msrest.serialization.Model):
"""The collection of storage account credentials.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The value.
:vartype value: list[~azure.mgmt.databoxedge.v2020_09_01.models.StorageAccountCredential]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[StorageAccountCredential]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageAccountCredentialList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class StorageAccountList(msrest.serialization.Model):
"""Collection of all the Storage Accounts on the Data Box Edge/Gateway device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of storageAccounts.
:vartype value: list[~azure.mgmt.databoxedge.v2020_09_01.models.StorageAccount]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[StorageAccount]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageAccountList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class SubscriptionRegisteredFeatures(msrest.serialization.Model):
"""SubscriptionRegisteredFeatures.
:param name:
:type name: str
:param state:
:type state: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SubscriptionRegisteredFeatures, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.state = kwargs.get('state', None)
class SymmetricKey(msrest.serialization.Model):
"""Symmetric key for authentication.
:param connection_string: Connection string based on the symmetric key.
:type connection_string: ~azure.mgmt.databoxedge.v2020_09_01.models.AsymmetricEncryptedSecret
"""
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'AsymmetricEncryptedSecret'},
}
def __init__(
self,
**kwargs
):
super(SymmetricKey, self).__init__(**kwargs)
self.connection_string = kwargs.get('connection_string', None)
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~azure.mgmt.databoxedge.v2020_09_01.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~azure.mgmt.databoxedge.v2020_09_01.models.CreatedByType
:param last_modified_at: The type of identity that last modified the resource.
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = kwargs.get('created_by', None)
self.created_by_type = kwargs.get('created_by_type', None)
self.created_at = kwargs.get('created_at', None)
self.last_modified_by = kwargs.get('last_modified_by', None)
self.last_modified_by_type = kwargs.get('last_modified_by_type', None)
self.last_modified_at = kwargs.get('last_modified_at', None)
class TrackingInfo(msrest.serialization.Model):
"""Tracking courier information.
:param serial_number: Serial number of the device being tracked.
:type serial_number: str
:param carrier_name: Name of the carrier used in the delivery.
:type carrier_name: str
:param tracking_id: Tracking ID of the shipment.
:type tracking_id: str
:param tracking_url: Tracking URL of the shipment.
:type tracking_url: str
"""
_attribute_map = {
'serial_number': {'key': 'serialNumber', 'type': 'str'},
'carrier_name': {'key': 'carrierName', 'type': 'str'},
'tracking_id': {'key': 'trackingId', 'type': 'str'},
'tracking_url': {'key': 'trackingUrl', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TrackingInfo, self).__init__(**kwargs)
self.serial_number = kwargs.get('serial_number', None)
self.carrier_name = kwargs.get('carrier_name', None)
self.tracking_id = kwargs.get('tracking_id', None)
self.tracking_url = kwargs.get('tracking_url', None)
class TriggerList(msrest.serialization.Model):
"""Collection of all trigger on the data box edge device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of triggers.
:vartype value: list[~azure.mgmt.databoxedge.v2020_09_01.models.Trigger]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Trigger]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TriggerList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class UpdateDownloadProgress(msrest.serialization.Model):
"""Details about the download progress of update.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar download_phase: The download phase. Possible values include: "Unknown", "Initializing",
"Downloading", "Verifying".
:vartype download_phase: str or ~azure.mgmt.databoxedge.v2020_09_01.models.DownloadPhase
:ivar percent_complete: Percentage of completion.
:vartype percent_complete: int
:ivar total_bytes_to_download: Total bytes to download.
:vartype total_bytes_to_download: float
:ivar total_bytes_downloaded: Total bytes downloaded.
:vartype total_bytes_downloaded: float
:ivar number_of_updates_to_download: Number of updates to download.
:vartype number_of_updates_to_download: int
:ivar number_of_updates_downloaded: Number of updates downloaded.
:vartype number_of_updates_downloaded: int
"""
_validation = {
'download_phase': {'readonly': True},
'percent_complete': {'readonly': True},
'total_bytes_to_download': {'readonly': True},
'total_bytes_downloaded': {'readonly': True},
'number_of_updates_to_download': {'readonly': True},
'number_of_updates_downloaded': {'readonly': True},
}
_attribute_map = {
'download_phase': {'key': 'downloadPhase', 'type': 'str'},
'percent_complete': {'key': 'percentComplete', 'type': 'int'},
'total_bytes_to_download': {'key': 'totalBytesToDownload', 'type': 'float'},
'total_bytes_downloaded': {'key': 'totalBytesDownloaded', 'type': 'float'},
'number_of_updates_to_download': {'key': 'numberOfUpdatesToDownload', 'type': 'int'},
'number_of_updates_downloaded': {'key': 'numberOfUpdatesDownloaded', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(UpdateDownloadProgress, self).__init__(**kwargs)
self.download_phase = None
self.percent_complete = None
self.total_bytes_to_download = None
self.total_bytes_downloaded = None
self.number_of_updates_to_download = None
self.number_of_updates_downloaded = None
class UpdateInstallProgress(msrest.serialization.Model):
"""Progress details during installation of updates.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar percent_complete: Percentage completed.
:vartype percent_complete: int
:ivar number_of_updates_to_install: Number of updates to install.
:vartype number_of_updates_to_install: int
:ivar number_of_updates_installed: Number of updates installed.
:vartype number_of_updates_installed: int
"""
_validation = {
'percent_complete': {'readonly': True},
'number_of_updates_to_install': {'readonly': True},
'number_of_updates_installed': {'readonly': True},
}
_attribute_map = {
'percent_complete': {'key': 'percentComplete', 'type': 'int'},
'number_of_updates_to_install': {'key': 'numberOfUpdatesToInstall', 'type': 'int'},
'number_of_updates_installed': {'key': 'numberOfUpdatesInstalled', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(UpdateInstallProgress, self).__init__(**kwargs)
self.percent_complete = None
self.number_of_updates_to_install = None
self.number_of_updates_installed = None
class UpdateSummary(ARMBaseModel):
"""Details about ongoing updates and availability of updates on the device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param device_version_number: The current version of the device in format: 1.2.17312.13.",.
:type device_version_number: str
:param friendly_device_version_name: The current version of the device in text format.
:type friendly_device_version_name: str
:param device_last_scanned_date_time: The last time when a scan was done on the device.
:type device_last_scanned_date_time: ~datetime.datetime
:param last_completed_scan_job_date_time: The time when the last scan job was completed
(success/cancelled/failed) on the appliance.
:type last_completed_scan_job_date_time: ~datetime.datetime
:ivar last_completed_download_job_date_time: The time when the last Download job was completed
(success/cancelled/failed) on the appliance.
:vartype last_completed_download_job_date_time: ~datetime.datetime
:ivar last_completed_install_job_date_time: The time when the last Install job was completed
(success/cancelled/failed) on the appliance.
:vartype last_completed_install_job_date_time: ~datetime.datetime
:ivar total_number_of_updates_available: The number of updates available for the current device
version as per the last device scan.
:vartype total_number_of_updates_available: int
:ivar total_number_of_updates_pending_download: The total number of items pending download.
:vartype total_number_of_updates_pending_download: int
:ivar total_number_of_updates_pending_install: The total number of items pending install.
:vartype total_number_of_updates_pending_install: int
:ivar reboot_behavior: Indicates if updates are available and at least one of the updates needs
a reboot. Possible values include: "NeverReboots", "RequiresReboot", "RequestReboot".
:vartype reboot_behavior: str or
~azure.mgmt.databoxedge.v2020_09_01.models.InstallRebootBehavior
:ivar ongoing_update_operation: The current update operation. Possible values include: "None",
"Scan", "Download", "Install".
:vartype ongoing_update_operation: str or
~azure.mgmt.databoxedge.v2020_09_01.models.UpdateOperation
:ivar in_progress_download_job_id: The job ID of the download job in progress.
:vartype in_progress_download_job_id: str
:ivar in_progress_install_job_id: The job ID of the install job in progress.
:vartype in_progress_install_job_id: str
:ivar in_progress_download_job_started_date_time: The time when the currently running download
(if any) started.
:vartype in_progress_download_job_started_date_time: ~datetime.datetime
:ivar in_progress_install_job_started_date_time: The time when the currently running install
(if any) started.
:vartype in_progress_install_job_started_date_time: ~datetime.datetime
:ivar update_titles: The list of updates available for install.
:vartype update_titles: list[str]
:ivar total_update_size_in_bytes: The total size of updates available for download in bytes.
:vartype total_update_size_in_bytes: float
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'last_completed_download_job_date_time': {'readonly': True},
'last_completed_install_job_date_time': {'readonly': True},
'total_number_of_updates_available': {'readonly': True},
'total_number_of_updates_pending_download': {'readonly': True},
'total_number_of_updates_pending_install': {'readonly': True},
'reboot_behavior': {'readonly': True},
'ongoing_update_operation': {'readonly': True},
'in_progress_download_job_id': {'readonly': True},
'in_progress_install_job_id': {'readonly': True},
'in_progress_download_job_started_date_time': {'readonly': True},
'in_progress_install_job_started_date_time': {'readonly': True},
'update_titles': {'readonly': True},
'total_update_size_in_bytes': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'device_version_number': {'key': 'properties.deviceVersionNumber', 'type': 'str'},
'friendly_device_version_name': {'key': 'properties.friendlyDeviceVersionName', 'type': 'str'},
'device_last_scanned_date_time': {'key': 'properties.deviceLastScannedDateTime', 'type': 'iso-8601'},
'last_completed_scan_job_date_time': {'key': 'properties.lastCompletedScanJobDateTime', 'type': 'iso-8601'},
'last_completed_download_job_date_time': {'key': 'properties.lastCompletedDownloadJobDateTime', 'type': 'iso-8601'},
'last_completed_install_job_date_time': {'key': 'properties.lastCompletedInstallJobDateTime', 'type': 'iso-8601'},
'total_number_of_updates_available': {'key': 'properties.totalNumberOfUpdatesAvailable', 'type': 'int'},
'total_number_of_updates_pending_download': {'key': 'properties.totalNumberOfUpdatesPendingDownload', 'type': 'int'},
'total_number_of_updates_pending_install': {'key': 'properties.totalNumberOfUpdatesPendingInstall', 'type': 'int'},
'reboot_behavior': {'key': 'properties.rebootBehavior', 'type': 'str'},
'ongoing_update_operation': {'key': 'properties.ongoingUpdateOperation', 'type': 'str'},
'in_progress_download_job_id': {'key': 'properties.inProgressDownloadJobId', 'type': 'str'},
'in_progress_install_job_id': {'key': 'properties.inProgressInstallJobId', 'type': 'str'},
'in_progress_download_job_started_date_time': {'key': 'properties.inProgressDownloadJobStartedDateTime', 'type': 'iso-8601'},
'in_progress_install_job_started_date_time': {'key': 'properties.inProgressInstallJobStartedDateTime', 'type': 'iso-8601'},
'update_titles': {'key': 'properties.updateTitles', 'type': '[str]'},
'total_update_size_in_bytes': {'key': 'properties.totalUpdateSizeInBytes', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(UpdateSummary, self).__init__(**kwargs)
self.device_version_number = kwargs.get('device_version_number', None)
self.friendly_device_version_name = kwargs.get('friendly_device_version_name', None)
self.device_last_scanned_date_time = kwargs.get('device_last_scanned_date_time', None)
self.last_completed_scan_job_date_time = kwargs.get('last_completed_scan_job_date_time', None)
self.last_completed_download_job_date_time = None
self.last_completed_install_job_date_time = None
self.total_number_of_updates_available = None
self.total_number_of_updates_pending_download = None
self.total_number_of_updates_pending_install = None
self.reboot_behavior = None
self.ongoing_update_operation = None
self.in_progress_download_job_id = None
self.in_progress_install_job_id = None
self.in_progress_download_job_started_date_time = None
self.in_progress_install_job_started_date_time = None
self.update_titles = None
self.total_update_size_in_bytes = None
class UploadCertificateRequest(msrest.serialization.Model):
"""The upload certificate request.
All required parameters must be populated in order to send to Azure.
:param authentication_type: The authentication type. Possible values include: "Invalid",
"AzureActiveDirectory".
:type authentication_type: str or ~azure.mgmt.databoxedge.v2020_09_01.models.AuthenticationType
:param certificate: Required. The base64 encoded certificate raw data.
:type certificate: str
"""
_validation = {
'certificate': {'required': True},
}
_attribute_map = {
'authentication_type': {'key': 'properties.authenticationType', 'type': 'str'},
'certificate': {'key': 'properties.certificate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UploadCertificateRequest, self).__init__(**kwargs)
self.authentication_type = kwargs.get('authentication_type', None)
self.certificate = kwargs['certificate']
class UploadCertificateResponse(msrest.serialization.Model):
"""The upload registration certificate response.
Variables are only populated by the server, and will be ignored when sending a request.
:param auth_type: Specifies authentication type. Possible values include: "Invalid",
"AzureActiveDirectory".
:type auth_type: str or ~azure.mgmt.databoxedge.v2020_09_01.models.AuthenticationType
:ivar resource_id: The resource ID of the Data Box Edge/Gateway device.
:vartype resource_id: str
:ivar aad_authority: Azure Active Directory tenant authority.
:vartype aad_authority: str
:ivar aad_tenant_id: Azure Active Directory tenant ID.
:vartype aad_tenant_id: str
:ivar service_principal_client_id: Azure Active Directory service principal client ID.
:vartype service_principal_client_id: str
:ivar service_principal_object_id: Azure Active Directory service principal object ID.
:vartype service_principal_object_id: str
:ivar azure_management_endpoint_audience: The azure management endpoint audience.
:vartype azure_management_endpoint_audience: str
:ivar aad_audience: Identifier of the target resource that is the recipient of the requested
token.
:vartype aad_audience: str
"""
_validation = {
'resource_id': {'readonly': True},
'aad_authority': {'readonly': True},
'aad_tenant_id': {'readonly': True},
'service_principal_client_id': {'readonly': True},
'service_principal_object_id': {'readonly': True},
'azure_management_endpoint_audience': {'readonly': True},
'aad_audience': {'readonly': True},
}
_attribute_map = {
'auth_type': {'key': 'authType', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'aad_authority': {'key': 'aadAuthority', 'type': 'str'},
'aad_tenant_id': {'key': 'aadTenantId', 'type': 'str'},
'service_principal_client_id': {'key': 'servicePrincipalClientId', 'type': 'str'},
'service_principal_object_id': {'key': 'servicePrincipalObjectId', 'type': 'str'},
'azure_management_endpoint_audience': {'key': 'azureManagementEndpointAudience', 'type': 'str'},
'aad_audience': {'key': 'aadAudience', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UploadCertificateResponse, self).__init__(**kwargs)
self.auth_type = kwargs.get('auth_type', None)
self.resource_id = None
self.aad_authority = None
self.aad_tenant_id = None
self.service_principal_client_id = None
self.service_principal_object_id = None
self.azure_management_endpoint_audience = None
self.aad_audience = None
class User(ARMBaseModel):
"""Represents a user who has access to one or more shares on the Data Box Edge/Gateway device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:ivar system_data: User in DataBoxEdge Resource.
:vartype system_data: ~azure.mgmt.databoxedge.v2020_09_01.models.SystemData
:param encrypted_password: The password details.
:type encrypted_password: ~azure.mgmt.databoxedge.v2020_09_01.models.AsymmetricEncryptedSecret
:ivar share_access_rights: List of shares that the user has rights on. This field should not be
specified during user creation.
:vartype share_access_rights: list[~azure.mgmt.databoxedge.v2020_09_01.models.ShareAccessRight]
:param user_type: Type of the user. Possible values include: "Share", "LocalManagement", "ARM".
:type user_type: str or ~azure.mgmt.databoxedge.v2020_09_01.models.UserType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'share_access_rights': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'encrypted_password': {'key': 'properties.encryptedPassword', 'type': 'AsymmetricEncryptedSecret'},
'share_access_rights': {'key': 'properties.shareAccessRights', 'type': '[ShareAccessRight]'},
'user_type': {'key': 'properties.userType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(User, self).__init__(**kwargs)
self.system_data = None
self.encrypted_password = kwargs.get('encrypted_password', None)
self.share_access_rights = None
self.user_type = kwargs.get('user_type', None)
class UserAccessRight(msrest.serialization.Model):
"""The mapping between a particular user and the access type on the SMB share.
All required parameters must be populated in order to send to Azure.
:param user_id: Required. User ID (already existing in the device).
:type user_id: str
:param access_type: Required. Type of access to be allowed for the user. Possible values
include: "Change", "Read", "Custom".
:type access_type: str or ~azure.mgmt.databoxedge.v2020_09_01.models.ShareAccessType
"""
_validation = {
'user_id': {'required': True},
'access_type': {'required': True},
}
_attribute_map = {
'user_id': {'key': 'userId', 'type': 'str'},
'access_type': {'key': 'accessType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserAccessRight, self).__init__(**kwargs)
self.user_id = kwargs['user_id']
self.access_type = kwargs['access_type']
class UserList(msrest.serialization.Model):
"""Collection of users.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of users.
:vartype value: list[~azure.mgmt.databoxedge.v2020_09_01.models.User]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[User]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserList, self).__init__(**kwargs)
self.value = None
self.next_link = None
|
Azure/azure-sdk-for-python
|
sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2020_09_01/models/_models.py
|
Python
|
mit
| 191,540
|
[
"ASE"
] |
3a7bf745eebf0b32f19da55b24f3a83dd3872be3a10e101678df2dd6f1338234
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import encounter
from .fhirdate import FHIRDate
class EncounterTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Encounter", js["resourceType"])
return encounter.Encounter(js)
def testEncounter1(self):
inst = self.instantiate_from("encounter-example-home.json")
self.assertIsNotNone(inst, "Must have instantiated a Encounter instance")
self.implEncounter1(inst)
js = inst.as_json()
self.assertEqual("Encounter", js["resourceType"])
inst2 = encounter.Encounter(js)
self.implEncounter1(inst2)
def implEncounter1(self, inst):
self.assertEqual(inst.class_fhir.code, "HH")
self.assertEqual(inst.class_fhir.display, "home health")
self.assertEqual(inst.class_fhir.system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.contained[0].id, "home")
self.assertEqual(inst.id, "home")
self.assertEqual(inst.location[0].period.end.date, FHIRDate("2015-01-17T16:30:00+10:00").date)
self.assertEqual(inst.location[0].period.end.as_json(), "2015-01-17T16:30:00+10:00")
self.assertEqual(inst.location[0].period.start.date, FHIRDate("2015-01-17T16:00:00+10:00").date)
self.assertEqual(inst.location[0].period.start.as_json(), "2015-01-17T16:00:00+10:00")
self.assertEqual(inst.location[0].status, "completed")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.participant[0].period.end.date, FHIRDate("2015-01-17T16:30:00+10:00").date)
self.assertEqual(inst.participant[0].period.end.as_json(), "2015-01-17T16:30:00+10:00")
self.assertEqual(inst.participant[0].period.start.date, FHIRDate("2015-01-17T16:00:00+10:00").date)
self.assertEqual(inst.participant[0].period.start.as_json(), "2015-01-17T16:00:00+10:00")
self.assertEqual(inst.period.end.date, FHIRDate("2015-01-17T16:30:00+10:00").date)
self.assertEqual(inst.period.end.as_json(), "2015-01-17T16:30:00+10:00")
self.assertEqual(inst.period.start.date, FHIRDate("2015-01-17T16:00:00+10:00").date)
self.assertEqual(inst.period.start.as_json(), "2015-01-17T16:00:00+10:00")
self.assertEqual(inst.status, "finished")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Encounter with patient @example who is at home</div>")
self.assertEqual(inst.text.status, "generated")
def testEncounter2(self):
inst = self.instantiate_from("encounter-example-f201-20130404.json")
self.assertIsNotNone(inst, "Must have instantiated a Encounter instance")
self.implEncounter2(inst)
js = inst.as_json()
self.assertEqual("Encounter", js["resourceType"])
inst2 = encounter.Encounter(js)
self.implEncounter2(inst2)
def implEncounter2(self, inst):
self.assertEqual(inst.class_fhir.code, "AMB")
self.assertEqual(inst.class_fhir.display, "ambulatory")
self.assertEqual(inst.class_fhir.system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.id, "f201")
self.assertEqual(inst.identifier[0].use, "temp")
self.assertEqual(inst.identifier[0].value, "Encounter_Roel_20130404")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.priority.coding[0].code, "17621005")
self.assertEqual(inst.priority.coding[0].display, "Normal")
self.assertEqual(inst.priority.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.reasonCode[0].text, "The patient had fever peaks over the last couple of days. He is worried about these peaks.")
self.assertEqual(inst.status, "finished")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type[0].coding[0].code, "11429006")
self.assertEqual(inst.type[0].coding[0].display, "Consultation")
self.assertEqual(inst.type[0].coding[0].system, "http://snomed.info/sct")
def testEncounter3(self):
inst = self.instantiate_from("encounter-example-f003-abscess.json")
self.assertIsNotNone(inst, "Must have instantiated a Encounter instance")
self.implEncounter3(inst)
js = inst.as_json()
self.assertEqual("Encounter", js["resourceType"])
inst2 = encounter.Encounter(js)
self.implEncounter3(inst2)
def implEncounter3(self, inst):
self.assertEqual(inst.class_fhir.code, "AMB")
self.assertEqual(inst.class_fhir.display, "ambulatory")
self.assertEqual(inst.class_fhir.system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.hospitalization.admitSource.coding[0].code, "305956004")
self.assertEqual(inst.hospitalization.admitSource.coding[0].display, "Referral by physician")
self.assertEqual(inst.hospitalization.admitSource.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.hospitalization.dischargeDisposition.coding[0].code, "306689006")
self.assertEqual(inst.hospitalization.dischargeDisposition.coding[0].display, "Discharge to home")
self.assertEqual(inst.hospitalization.dischargeDisposition.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.hospitalization.preAdmissionIdentifier.system, "http://www.bmc.nl/zorgportal/identifiers/pre-admissions")
self.assertEqual(inst.hospitalization.preAdmissionIdentifier.use, "official")
self.assertEqual(inst.hospitalization.preAdmissionIdentifier.value, "93042")
self.assertEqual(inst.id, "f003")
self.assertEqual(inst.identifier[0].system, "http://www.bmc.nl/zorgportal/identifiers/encounters")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "v6751")
self.assertEqual(inst.length.code, "min")
self.assertEqual(inst.length.system, "http://unitsofmeasure.org")
self.assertEqual(inst.length.unit, "min")
self.assertEqual(inst.length.value, 90)
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.priority.coding[0].code, "103391001")
self.assertEqual(inst.priority.coding[0].display, "Non-urgent ear, nose and throat admission")
self.assertEqual(inst.priority.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.reasonCode[0].coding[0].code, "18099001")
self.assertEqual(inst.reasonCode[0].coding[0].display, "Retropharyngeal abscess")
self.assertEqual(inst.reasonCode[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.status, "finished")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type[0].coding[0].code, "270427003")
self.assertEqual(inst.type[0].coding[0].display, "Patient-initiated encounter")
self.assertEqual(inst.type[0].coding[0].system, "http://snomed.info/sct")
def testEncounter4(self):
inst = self.instantiate_from("encounter-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Encounter instance")
self.implEncounter4(inst)
js = inst.as_json()
self.assertEqual("Encounter", js["resourceType"])
inst2 = encounter.Encounter(js)
self.implEncounter4(inst2)
def implEncounter4(self, inst):
self.assertEqual(inst.class_fhir.code, "IMP")
self.assertEqual(inst.class_fhir.display, "inpatient encounter")
self.assertEqual(inst.class_fhir.system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.status, "in-progress")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Encounter with patient @example</div>")
self.assertEqual(inst.text.status, "generated")
def testEncounter5(self):
inst = self.instantiate_from("encounter-example-f002-lung.json")
self.assertIsNotNone(inst, "Must have instantiated a Encounter instance")
self.implEncounter5(inst)
js = inst.as_json()
self.assertEqual("Encounter", js["resourceType"])
inst2 = encounter.Encounter(js)
self.implEncounter5(inst2)
def implEncounter5(self, inst):
self.assertEqual(inst.class_fhir.code, "AMB")
self.assertEqual(inst.class_fhir.display, "ambulatory")
self.assertEqual(inst.class_fhir.system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.hospitalization.admitSource.coding[0].code, "305997006")
self.assertEqual(inst.hospitalization.admitSource.coding[0].display, "Referral by radiologist")
self.assertEqual(inst.hospitalization.admitSource.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.hospitalization.dischargeDisposition.coding[0].code, "306689006")
self.assertEqual(inst.hospitalization.dischargeDisposition.coding[0].display, "Discharge to home")
self.assertEqual(inst.hospitalization.dischargeDisposition.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.hospitalization.preAdmissionIdentifier.system, "http://www.bmc.nl/zorgportal/identifiers/pre-admissions")
self.assertEqual(inst.hospitalization.preAdmissionIdentifier.use, "official")
self.assertEqual(inst.hospitalization.preAdmissionIdentifier.value, "98682")
self.assertEqual(inst.id, "f002")
self.assertEqual(inst.identifier[0].system, "http://www.bmc.nl/zorgportal/identifiers/encounters")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "v3251")
self.assertEqual(inst.length.code, "min")
self.assertEqual(inst.length.system, "http://unitsofmeasure.org")
self.assertEqual(inst.length.unit, "min")
self.assertEqual(inst.length.value, 140)
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.priority.coding[0].code, "103391001")
self.assertEqual(inst.priority.coding[0].display, "Urgent")
self.assertEqual(inst.priority.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.reasonCode[0].coding[0].code, "34068001")
self.assertEqual(inst.reasonCode[0].coding[0].display, "Partial lobectomy of lung")
self.assertEqual(inst.reasonCode[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.status, "finished")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type[0].coding[0].code, "270427003")
self.assertEqual(inst.type[0].coding[0].display, "Patient-initiated encounter")
self.assertEqual(inst.type[0].coding[0].system, "http://snomed.info/sct")
def testEncounter6(self):
inst = self.instantiate_from("encounter-example-f203-20130311.json")
self.assertIsNotNone(inst, "Must have instantiated a Encounter instance")
self.implEncounter6(inst)
js = inst.as_json()
self.assertEqual("Encounter", js["resourceType"])
inst2 = encounter.Encounter(js)
self.implEncounter6(inst2)
def implEncounter6(self, inst):
self.assertEqual(inst.class_fhir.code, "IMP")
self.assertEqual(inst.class_fhir.display, "inpatient encounter")
self.assertEqual(inst.class_fhir.system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.diagnosis[0].rank, 1)
self.assertEqual(inst.diagnosis[0].use.coding[0].code, "AD")
self.assertEqual(inst.diagnosis[0].use.coding[0].display, "Admission diagnosis")
self.assertEqual(inst.diagnosis[0].use.coding[0].system, "http://terminology.hl7.org/CodeSystem/diagnosis-role")
self.assertEqual(inst.diagnosis[1].use.coding[0].code, "DD")
self.assertEqual(inst.diagnosis[1].use.coding[0].display, "Discharge diagnosis")
self.assertEqual(inst.diagnosis[1].use.coding[0].system, "http://terminology.hl7.org/CodeSystem/diagnosis-role")
self.assertEqual(inst.hospitalization.admitSource.coding[0].code, "309902002")
self.assertEqual(inst.hospitalization.admitSource.coding[0].display, "Clinical Oncology Department")
self.assertEqual(inst.hospitalization.admitSource.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.hospitalization.dietPreference[0].coding[0].code, "276026009")
self.assertEqual(inst.hospitalization.dietPreference[0].coding[0].display, "Fluid balance regulation")
self.assertEqual(inst.hospitalization.dietPreference[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.hospitalization.reAdmission.coding[0].display, "readmitted")
self.assertEqual(inst.hospitalization.specialArrangement[0].coding[0].code, "wheel")
self.assertEqual(inst.hospitalization.specialArrangement[0].coding[0].display, "Wheelchair")
self.assertEqual(inst.hospitalization.specialArrangement[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/encounter-special-arrangements")
self.assertEqual(inst.hospitalization.specialCourtesy[0].coding[0].code, "NRM")
self.assertEqual(inst.hospitalization.specialCourtesy[0].coding[0].display, "normal courtesy")
self.assertEqual(inst.hospitalization.specialCourtesy[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-EncounterSpecialCourtesy")
self.assertEqual(inst.id, "f203")
self.assertEqual(inst.identifier[0].use, "temp")
self.assertEqual(inst.identifier[0].value, "Encounter_Roel_20130311")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.participant[0].type[0].coding[0].code, "PART")
self.assertEqual(inst.participant[0].type[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.period.end.date, FHIRDate("2013-03-20").date)
self.assertEqual(inst.period.end.as_json(), "2013-03-20")
self.assertEqual(inst.period.start.date, FHIRDate("2013-03-11").date)
self.assertEqual(inst.period.start.as_json(), "2013-03-11")
self.assertEqual(inst.priority.coding[0].code, "394849002")
self.assertEqual(inst.priority.coding[0].display, "High priority")
self.assertEqual(inst.priority.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.reasonCode[0].text, "The patient seems to suffer from bilateral pneumonia and renal insufficiency, most likely due to chemotherapy.")
self.assertEqual(inst.status, "finished")
self.assertEqual(inst.statusHistory[0].period.start.date, FHIRDate("2013-03-08").date)
self.assertEqual(inst.statusHistory[0].period.start.as_json(), "2013-03-08")
self.assertEqual(inst.statusHistory[0].status, "arrived")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type[0].coding[0].code, "183807002")
self.assertEqual(inst.type[0].coding[0].display, "Inpatient stay for nine days")
self.assertEqual(inst.type[0].coding[0].system, "http://snomed.info/sct")
def testEncounter7(self):
inst = self.instantiate_from("encounter-example-xcda.json")
self.assertIsNotNone(inst, "Must have instantiated a Encounter instance")
self.implEncounter7(inst)
js = inst.as_json()
self.assertEqual("Encounter", js["resourceType"])
inst2 = encounter.Encounter(js)
self.implEncounter7(inst2)
def implEncounter7(self, inst):
self.assertEqual(inst.class_fhir.code, "AMB")
self.assertEqual(inst.class_fhir.display, "ambulatory")
self.assertEqual(inst.class_fhir.system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.id, "xcda")
self.assertEqual(inst.identifier[0].system, "http://healthcare.example.org/identifiers/enocunter")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "1234213.52345873")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.reasonCode[0].coding[0].code, "T-D8200")
self.assertEqual(inst.reasonCode[0].coding[0].display, "Arm")
self.assertEqual(inst.reasonCode[0].coding[0].system, "http://ihe.net/xds/connectathon/eventCodes")
self.assertEqual(inst.status, "finished")
self.assertEqual(inst.text.status, "generated")
def testEncounter8(self):
inst = self.instantiate_from("encounter-example-f202-20130128.json")
self.assertIsNotNone(inst, "Must have instantiated a Encounter instance")
self.implEncounter8(inst)
js = inst.as_json()
self.assertEqual("Encounter", js["resourceType"])
inst2 = encounter.Encounter(js)
self.implEncounter8(inst2)
def implEncounter8(self, inst):
self.assertEqual(inst.class_fhir.code, "AMB")
self.assertEqual(inst.class_fhir.display, "ambulatory")
self.assertEqual(inst.class_fhir.system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.diagnosis[0].rank, 2)
self.assertEqual(inst.diagnosis[0].use.coding[0].code, "AD")
self.assertEqual(inst.diagnosis[0].use.coding[0].display, "Admission diagnosis")
self.assertEqual(inst.diagnosis[0].use.coding[0].system, "http://terminology.hl7.org/CodeSystem/diagnosis-role")
self.assertEqual(inst.diagnosis[1].rank, 1)
self.assertEqual(inst.diagnosis[1].use.coding[0].code, "CC")
self.assertEqual(inst.diagnosis[1].use.coding[0].display, "Chief complaint")
self.assertEqual(inst.diagnosis[1].use.coding[0].system, "http://terminology.hl7.org/CodeSystem/diagnosis-role")
self.assertEqual(inst.id, "f202")
self.assertEqual(inst.identifier[0].use, "temp")
self.assertEqual(inst.identifier[0].value, "Encounter_Roel_20130128")
self.assertEqual(inst.length.code, "min")
self.assertEqual(inst.length.system, "http://unitsofmeasure.org")
self.assertEqual(inst.length.unit, "minutes")
self.assertEqual(inst.length.value, 56)
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.priority.coding[0].code, "103391001")
self.assertEqual(inst.priority.coding[0].display, "Urgent")
self.assertEqual(inst.priority.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.reasonCode[0].text, "The patient is treated for a tumor.")
self.assertEqual(inst.status, "finished")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type[0].coding[0].code, "367336001")
self.assertEqual(inst.type[0].coding[0].display, "Chemotherapy")
self.assertEqual(inst.type[0].coding[0].system, "http://snomed.info/sct")
def testEncounter9(self):
inst = self.instantiate_from("encounter-example-emerg.json")
self.assertIsNotNone(inst, "Must have instantiated a Encounter instance")
self.implEncounter9(inst)
js = inst.as_json()
self.assertEqual("Encounter", js["resourceType"])
inst2 = encounter.Encounter(js)
self.implEncounter9(inst2)
def implEncounter9(self, inst):
self.assertEqual(inst.classHistory[0].class_fhir.code, "EMER")
self.assertEqual(inst.classHistory[0].class_fhir.display, "emergency")
self.assertEqual(inst.classHistory[0].class_fhir.system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.classHistory[0].period.end.date, FHIRDate("2017-02-01T09:27:00+10:00").date)
self.assertEqual(inst.classHistory[0].period.end.as_json(), "2017-02-01T09:27:00+10:00")
self.assertEqual(inst.classHistory[0].period.start.date, FHIRDate("2017-02-01T07:15:00+10:00").date)
self.assertEqual(inst.classHistory[0].period.start.as_json(), "2017-02-01T07:15:00+10:00")
self.assertEqual(inst.classHistory[1].class_fhir.code, "IMP")
self.assertEqual(inst.classHistory[1].class_fhir.display, "inpatient encounter")
self.assertEqual(inst.classHistory[1].class_fhir.system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.classHistory[1].period.start.date, FHIRDate("2017-02-01T09:27:00+10:00").date)
self.assertEqual(inst.classHistory[1].period.start.as_json(), "2017-02-01T09:27:00+10:00")
self.assertEqual(inst.class_fhir.code, "IMP")
self.assertEqual(inst.class_fhir.display, "inpatient encounter")
self.assertEqual(inst.class_fhir.system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.hospitalization.admitSource.coding[0].code, "emd")
self.assertEqual(inst.hospitalization.admitSource.coding[0].display, "From accident/emergency department")
self.assertEqual(inst.hospitalization.admitSource.coding[0].system, "http://terminology.hl7.org/CodeSystem/admit-source")
self.assertEqual(inst.id, "emerg")
self.assertEqual(inst.location[0].period.end.date, FHIRDate("2017-02-01T08:45:00+10:00").date)
self.assertEqual(inst.location[0].period.end.as_json(), "2017-02-01T08:45:00+10:00")
self.assertEqual(inst.location[0].period.start.date, FHIRDate("2017-02-01T07:15:00+10:00").date)
self.assertEqual(inst.location[0].period.start.as_json(), "2017-02-01T07:15:00+10:00")
self.assertEqual(inst.location[0].status, "active")
self.assertEqual(inst.location[1].period.end.date, FHIRDate("2017-02-01T09:27:00+10:00").date)
self.assertEqual(inst.location[1].period.end.as_json(), "2017-02-01T09:27:00+10:00")
self.assertEqual(inst.location[1].period.start.date, FHIRDate("2017-02-01T08:45:00+10:00").date)
self.assertEqual(inst.location[1].period.start.as_json(), "2017-02-01T08:45:00+10:00")
self.assertEqual(inst.location[1].status, "active")
self.assertEqual(inst.location[2].period.end.date, FHIRDate("2017-02-01T12:15:00+10:00").date)
self.assertEqual(inst.location[2].period.end.as_json(), "2017-02-01T12:15:00+10:00")
self.assertEqual(inst.location[2].period.start.date, FHIRDate("2017-02-01T09:27:00+10:00").date)
self.assertEqual(inst.location[2].period.start.as_json(), "2017-02-01T09:27:00+10:00")
self.assertEqual(inst.location[2].status, "active")
self.assertEqual(inst.location[3].period.end.date, FHIRDate("2017-02-01T12:45:00+10:00").date)
self.assertEqual(inst.location[3].period.end.as_json(), "2017-02-01T12:45:00+10:00")
self.assertEqual(inst.location[3].period.start.date, FHIRDate("2017-02-01T12:15:00+10:00").date)
self.assertEqual(inst.location[3].period.start.as_json(), "2017-02-01T12:15:00+10:00")
self.assertEqual(inst.location[3].status, "reserved")
self.assertEqual(inst.location[4].period.start.date, FHIRDate("2017-02-01T12:45:00+10:00").date)
self.assertEqual(inst.location[4].period.start.as_json(), "2017-02-01T12:45:00+10:00")
self.assertEqual(inst.location[4].status, "active")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.period.start.date, FHIRDate("2017-02-01T07:15:00+10:00").date)
self.assertEqual(inst.period.start.as_json(), "2017-02-01T07:15:00+10:00")
self.assertEqual(inst.status, "in-progress")
self.assertEqual(inst.statusHistory[0].period.end.date, FHIRDate("2017-02-01T07:35:00+10:00").date)
self.assertEqual(inst.statusHistory[0].period.end.as_json(), "2017-02-01T07:35:00+10:00")
self.assertEqual(inst.statusHistory[0].period.start.date, FHIRDate("2017-02-01T07:15:00+10:00").date)
self.assertEqual(inst.statusHistory[0].period.start.as_json(), "2017-02-01T07:15:00+10:00")
self.assertEqual(inst.statusHistory[0].status, "arrived")
self.assertEqual(inst.statusHistory[1].period.end.date, FHIRDate("2017-02-01T08:45:00+10:00").date)
self.assertEqual(inst.statusHistory[1].period.end.as_json(), "2017-02-01T08:45:00+10:00")
self.assertEqual(inst.statusHistory[1].period.start.date, FHIRDate("2017-02-01T07:35:00+10:00").date)
self.assertEqual(inst.statusHistory[1].period.start.as_json(), "2017-02-01T07:35:00+10:00")
self.assertEqual(inst.statusHistory[1].status, "triaged")
self.assertEqual(inst.statusHistory[2].period.end.date, FHIRDate("2017-02-01T12:15:00+10:00").date)
self.assertEqual(inst.statusHistory[2].period.end.as_json(), "2017-02-01T12:15:00+10:00")
self.assertEqual(inst.statusHistory[2].period.start.date, FHIRDate("2017-02-01T08:45:00+10:00").date)
self.assertEqual(inst.statusHistory[2].period.start.as_json(), "2017-02-01T08:45:00+10:00")
self.assertEqual(inst.statusHistory[2].status, "in-progress")
self.assertEqual(inst.statusHistory[3].period.end.date, FHIRDate("2017-02-01T12:45:00+10:00").date)
self.assertEqual(inst.statusHistory[3].period.end.as_json(), "2017-02-01T12:45:00+10:00")
self.assertEqual(inst.statusHistory[3].period.start.date, FHIRDate("2017-02-01T12:15:00+10:00").date)
self.assertEqual(inst.statusHistory[3].period.start.as_json(), "2017-02-01T12:15:00+10:00")
self.assertEqual(inst.statusHistory[3].status, "onleave")
self.assertEqual(inst.statusHistory[4].period.start.date, FHIRDate("2017-02-01T12:45:00+10:00").date)
self.assertEqual(inst.statusHistory[4].period.start.as_json(), "2017-02-01T12:45:00+10:00")
self.assertEqual(inst.statusHistory[4].status, "in-progress")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Emergency visit that escalated into inpatient patient @example</div>")
self.assertEqual(inst.text.status, "generated")
def testEncounter10(self):
inst = self.instantiate_from("encounter-example-f001-heart.json")
self.assertIsNotNone(inst, "Must have instantiated a Encounter instance")
self.implEncounter10(inst)
js = inst.as_json()
self.assertEqual("Encounter", js["resourceType"])
inst2 = encounter.Encounter(js)
self.implEncounter10(inst2)
def implEncounter10(self, inst):
self.assertEqual(inst.class_fhir.code, "AMB")
self.assertEqual(inst.class_fhir.display, "ambulatory")
self.assertEqual(inst.class_fhir.system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.hospitalization.admitSource.coding[0].code, "305956004")
self.assertEqual(inst.hospitalization.admitSource.coding[0].display, "Referral by physician")
self.assertEqual(inst.hospitalization.admitSource.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.hospitalization.dischargeDisposition.coding[0].code, "306689006")
self.assertEqual(inst.hospitalization.dischargeDisposition.coding[0].display, "Discharge to home")
self.assertEqual(inst.hospitalization.dischargeDisposition.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.hospitalization.preAdmissionIdentifier.system, "http://www.amc.nl/zorgportal/identifiers/pre-admissions")
self.assertEqual(inst.hospitalization.preAdmissionIdentifier.use, "official")
self.assertEqual(inst.hospitalization.preAdmissionIdentifier.value, "93042")
self.assertEqual(inst.id, "f001")
self.assertEqual(inst.identifier[0].system, "http://www.amc.nl/zorgportal/identifiers/visits")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "v1451")
self.assertEqual(inst.length.code, "min")
self.assertEqual(inst.length.system, "http://unitsofmeasure.org")
self.assertEqual(inst.length.unit, "min")
self.assertEqual(inst.length.value, 140)
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.priority.coding[0].code, "310361003")
self.assertEqual(inst.priority.coding[0].display, "Non-urgent cardiological admission")
self.assertEqual(inst.priority.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.reasonCode[0].coding[0].code, "34068001")
self.assertEqual(inst.reasonCode[0].coding[0].display, "Heart valve replacement")
self.assertEqual(inst.reasonCode[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.status, "finished")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type[0].coding[0].code, "270427003")
self.assertEqual(inst.type[0].coding[0].display, "Patient-initiated encounter")
self.assertEqual(inst.type[0].coding[0].system, "http://snomed.info/sct")
|
all-of-us/raw-data-repository
|
rdr_service/lib_fhir/fhirclient_4_0_0/models/encounter_tests.py
|
Python
|
bsd-3-clause
| 31,172
|
[
"VisIt"
] |
952e9cef1bd365ee3fb9f820b6b85d87050dba88a9e2b687b39d6305272f3a63
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 25 13:08:19 2015
@author: jgimenez
"""
from PyQt4 import QtGui, QtCore
from postpro_ui import Ui_postproUI
from PyFoam.RunDictionary.ParsedParameterFile import ParsedParameterFile
from PyFoam.RunDictionary.BoundaryDict import BoundaryDict
import os
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from utils import *
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class postproUI(QtGui.QScrollArea, Ui_postproUI):
def __init__(self, parent=None, f=QtCore.Qt.WindowFlags()):
QtGui.QScrollArea.__init__(self, parent)
self.setupUi(self)
apps = {}
apps['Vorticity'] = 'vorticity'
apps['Mach Number'] = 'Mach'
apps['Courant Number'] = 'Co'
apps['Pecklet Number'] = 'Pe'
apps['Stream Function'] = 'streamFunction'
apps['Enstrophy'] = 'enstrophy'
apps['Q Criterion Number'] = 'Q'
apps['y Plus RAS'] = 'yPlusRAS'
apps['y Plus LES'] = 'yPlusLES'
apps['Wall Velocity Gradient'] = 'wallGradU'
apps['Average'] = 'patchAverage'
apps['Integral'] = 'patchIntegrate'
class postproWidget(postproUI):
def __init__(self,currentFolder):
postproUI.__init__(self)
def setCurrentFolder(self, currentFolder):
self.currentFolder = currentFolder
#filling data
self.nproc = self.window().nproc
[timedir,self.fields,currtime] = currentFields(str(self.currentFolder),nproc=self.nproc)
self.field_3.clear()
self.field_3.addItems(self.fields)
self.boundaries = BoundaryDict(str(self.currentFolder))
self.bou_3.clear()
self.bou_3.addItems(self.boundaries.patches())
def openParaview(self):
os.system('paraFoam -builtin -case %s &'%self.currentFolder)
def exportData(self):
if self.nproc>1:
w = QtGui.QMessageBox(QtGui.QMessageBox.Information, "Error", "Data only can be exported in reconstructed cases")
w.exec_()
return
tt = ''
if self.time_4.currentText()=='Latest Time':
tt = '-latestTime'
opt = str(self.comboBox.currentText())
filename = '%s/export.log'%self.currentFolder
self.window().newLogTab('Export',filename)
if opt=='VTK':
action = 'foamToVTK -case %s %s > %s &' %(self.currentFolder,tt,filename)
elif opt=='Fluent':
action = 'foamMeshToFluent -case %s &' %(self.currentFolder)
os.system(action)
action = 'cp %s/caseDicts/foamDataToFluentDict %s/system/foamDataToFluentDict'%(os.path.dirname(os.path.realpath(__file__)),self.currentFolder)
os.system(action)
parsedData = ParsedParameterFile('%s/system/foamDataToFluentDict'%self.currentFolder,createZipped=False)
ii = 10
for ifield in self.fields:
if ifield not in parsedData.getValueDict().keys():
parsedData[ifield] = ii
ii = ii + 1
action = 'foamDataToFluent -case %s %s > %s &' %(self.currentFolder,tt,filename)
elif opt=='Ensight':
action = 'foamToEnsight -case %s %s > %s &' %(self.currentFolder,tt,filename)
os.system(action)
return
def calculate1(self):
tt = ''
if self.time_1.currentText()=='Latest Time':
tt = '-latestTime'
filename = '%s/field_calculation.log'%self.currentFolder
self.window().newLogTab('Postpro Field',filename)
if self.nproc<=1:
action = '%s -case %s %s > %s'%(apps[str(self.field_1.currentText())],self.currentFolder, tt, filename)
else:
action = 'mpirun -np %s %s -case %s %s -parallel > %s'%(str(self.nproc), apps[str(self.field_1.currentText())],self.currentFolder, tt, filename)
os.system(action)
return
def calculate2(self):
tt = ''
if self.time_2.currentText()=='Latest Time':
tt = '-latestTime'
filename = '%s/walls_calculation.log'%self.currentFolder
if self.field_2.currentText()=='y Plus RAS':
if not os.path.isfile('%s/constant/RASProperties'%self.currentFolder):
QtGui.QMessageBox(QtGui.QMessageBox.Information, "Caution", "Action can not be done!").exec_()
return
if self.field_2.currentText()=='y Plus LES':
if not os.path.isfile('%s/constant/LESProperties'%self.currentFolder):
QtGui.QMessageBox(QtGui.QMessageBox.Information, "Caution", "Action can not be done!").exec_()
return
self.window().newLogTab('Postpro Wall',filename)
if self.nproc<=1:
action = '%s -case %s %s > %s'%(apps[str(self.field_2.currentText())],self.currentFolder, tt, filename)
else:
action = 'mpirun -np %s %s -case %s %s -parallel > %s'%(str(self.nproc),apps[str(self.field_2.currentText())],self.currentFolder, tt, filename)
os.system(action)
return
def calculate3(self):
tt = ''
if self.time_3.currentText()=='Latest Time':
tt = '-latestTime'
filename = '%s/patch_calculation.log'%self.currentFolder
self.window().newLogTab('Postpro Patch',filename)
fieldName = str(self.field_3.currentText())
patchName = str(self.bou_3.currentText())
if self.nproc<=1:
action = '%s -case %s %s %s %s > %s &' %(apps[str(self.type_3.currentText())],self.currentFolder,tt,fieldName,patchName,filename)
else:
action = 'mpirun -np %s %s -case %s %s %s %s -parallel > %s &' %(str(self.nproc), apps[str(self.type_3.currentText())],self.currentFolder,tt,fieldName,patchName,filename)
os.system(action)
return
|
jmarcelogimenez/petroSym
|
petroSym/postpro.py
|
Python
|
gpl-2.0
| 6,201
|
[
"VTK"
] |
8fa22c48b8beaf6c07d6287efecef4e0dc759841af8c83a3ae68c5508e4ce81a
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the standard renderer, render window
# and interactor
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.SetDesiredUpdateRate(3)
# Create a small mesh. The coarser and more opaque the mesh, the easier it
# is to see rendering errors.
input = vtk.vtkImageMandelbrotSource()
input.SetWholeExtent(0,2,0,2,0,2)
input.SetSizeCX(2,2,2,2)
input.SetMaximumNumberOfIterations(10)
# make sure we have only tetrahedra
trifilter = vtk.vtkDataSetTriangleFilter()
trifilter.SetInputConnection(input.GetOutputPort())
# Create transfer mapping scalar value to opacity
opacityTransferFunction = vtk.vtkPiecewiseFunction()
opacityTransferFunction.AddPoint(0,0.0)
opacityTransferFunction.AddPoint(10,1.0)
# Create transfer mapping scalar value to color
colorTransferFunction = vtk.vtkColorTransferFunction()
colorTransferFunction.AddRGBPoint(0,1.0,0.0,1.0)
colorTransferFunction.AddRGBPoint(2,0.0,0.0,1.0)
colorTransferFunction.AddRGBPoint(4,0.0,1.0,1.0)
colorTransferFunction.AddRGBPoint(6,0.0,1.0,0.0)
colorTransferFunction.AddRGBPoint(8,1.0,1.0,0.0)
colorTransferFunction.AddRGBPoint(10,1.0,0.0,0.0)
# The property describes how the data will look
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(colorTransferFunction)
volumeProperty.SetScalarOpacity(opacityTransferFunction)
volumeProperty.ShadeOff()
volumeProperty.SetInterpolationTypeToLinear()
volumeProperty.SetScalarOpacityUnitDistance(0.75)
# The mapper / ray cast function / ray integrator know how to render the data
volumeMapper = vtk.vtkUnstructuredGridVolumeZSweepMapper()
volumeMapper.SetInputConnection(trifilter.GetOutputPort())
#vtkUnstructuredGridLinearRayIntegrator rayIntegrator
# volumeMapper SetRayIntegrator rayIntegrator
rayIntegrator = vtk.vtkUnstructuredGridPreIntegration()
volumeMapper.SetRayIntegrator(rayIntegrator)
# The volume holds the mapper and the property and
# can be used to position/orient the volume
volume = vtk.vtkVolume()
volume.SetMapper(volumeMapper)
volume.SetProperty(volumeProperty)
ren1.AddVolume(volume)
renWin.SetSize(300,300)
ren1.ResetCamera()
ren1.GetActiveCamera().Azimuth(20.0)
ren1.GetActiveCamera().Elevation(15.0)
ren1.GetActiveCamera().Zoom(1.5)
renWin.Render()
def TkCheckAbort (__vtk__temp0=0,__vtk__temp1=0):
foo = renWin.GetEventPending()
if (foo != 0):
renWin.SetAbortRender(1)
pass
renWin.AddObserver("AbortCheckEvent",TkCheckAbort)
iren.Initialize()
# --- end of script --
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Rendering/Volume/Testing/Python/TestPTZSweep.py
|
Python
|
bsd-3-clause
| 2,739
|
[
"VTK"
] |
de395fe76b4914c78775d89ad76de5585430045cb582d85a339f1e7493d99d0d
|
"""
Implements a well defined versioning schema.
There are three class types - VersionToken, Version and VersionRange. A Version
is a set of zero or more VersionTokens, separate by '.'s or '-'s (eg "1.2-3").
A VersionToken is a string containing alphanumerics, and default implemenations
'NumericToken' and 'AlphanumericVersionToken' are supplied. You can implement
your own if you want stricter tokens or different sorting behaviour.
A VersionRange is a set of one or more contiguous version ranges - for example,
"3+<5" contains any version >=3 but less than 5. Version ranges can be used to
define dependency requirements between objects. They can be OR'd together, AND'd
and inverted.
The empty version '', and empty version range '', are also handled. The empty
version is used to denote unversioned objects. The empty version range, also
known as the 'any' range, is used to refer to any version of an object.
"""
from rez.vendor.version.util import VersionError, ParseException, _Common, \
total_ordering, dedup
import rez.vendor.pyparsing.pyparsing as pp
from bisect import bisect_left
import copy
import string
import re
re_token = re.compile(r"[a-zA-Z0-9_]+")
@total_ordering
class _Comparable(_Common):
def __lt__(self, other):
raise NotImplementedError
@total_ordering
class _ReversedComparable(_Common):
def __init__(self, value):
self.value = value
def __lt__(self, other):
return not (self.value < other.value)
def __str__(self):
return "reverse(%s)" % str(self.value)
def __repr__(self):
return "reverse(%r)" % self.value
class VersionToken(_Comparable):
"""Token within a version number.
A version token is that part of a version number that appears between a
delimiter, typically '.' or '-'. For example, the version number '2.3.07b'
contains the tokens '2', '3' and '07b' respectively.
Version tokens are only allowed to contain alphanumerics (any case) and
underscores.
"""
def __init__(self, token):
"""Create a VersionToken.
Args:
token: Token string, eg "rc02"
"""
raise NotImplementedError
@classmethod
def create_random_token_string(cls):
"""Create a random token string. For testing purposes only."""
raise NotImplementedError
def less_than(self, other):
"""Compare to another VersionToken.
Args:
other: The VersionToken object to compare against.
Returns:
True if this token is less than other, False otherwise.
"""
raise NotImplementedError
def next(self):
"""Returns the next largest token."""
raise NotImplementedError
def __str__(self):
raise NotImplementedError
def __lt__(self, other):
return self.less_than(other)
def __eq__(self, other):
return (not self < other) and (not other < self)
class NumericToken(VersionToken):
"""Numeric version token.
Version token supporting numbers only. Padding is ignored.
"""
def __init__(self, token):
if not token.isdigit():
raise VersionError("Invalid version token: '%s'" % token)
else:
self.n = int(token)
@classmethod
def create_random_token_string(cls):
import random
chars = string.digits
return ''.join([chars[random.randint(0, len(chars) - 1)]
for _ in range(8)])
def __str__(self):
return str(self.n)
def __eq__(self, other):
return (self.n == other.n)
def less_than(self, other):
return (self.n < other.n)
def next(self):
other = copy.copy(self)
other.n = self.n = 1
return other
class _SubToken(_Comparable):
"""Used internally by AlphanumericVersionToken."""
def __init__(self, s):
self.s = s
self.n = int(s) if s.isdigit() else None
def __lt__(self, other):
if self.n is None:
return (self.s < other.s) if other.n is None else True
else:
return False if other.n is None \
else ((self.n, self.s) < (other.n, other.s))
def __eq__(self, other):
return (self.s == other.s) and (self.n == other.n)
def __str__(self):
return self.s
class AlphanumericVersionToken(VersionToken):
"""Alphanumeric version token.
These tokens compare as follows:
- each token is split into alpha and numeric groups (subtokens);
- the resulting subtoken list is compared.
- alpha comparison is case-sensitive, numeric comparison is padding-sensitive.
Subtokens compare as follows:
- alphas come before numbers;
- alphas are compared alphabetically (_, then A-Z, then a-z);
- numbers are compared numerically. If numbers are equivalent but zero-
padded differently, they are then compared alphabetically. Thus "01" < "1".
Some example comparisons that equate to true:
- "3" < "4"
- "01" < "1"
- "beta" < "1"
- "alpha3" < "alpha4"
- "alpha" < "alpha3"
- "gamma33" < "33gamma"
"""
numeric_regex = re.compile("[0-9]+")
regex = re.compile(r"[a-zA-Z0-9_]+\Z")
def __init__(self, token):
if token is None:
self.subtokens = None
elif not self.regex.match(token):
raise VersionError("Invalid version token: '%s'" % token)
else:
self.subtokens = self._parse(token)
@classmethod
def create_random_token_string(cls):
import random
chars = string.digits + string.ascii_letters
return ''.join([chars[random.randint(0, len(chars) - 1)]
for _ in range(8)])
def __str__(self):
return ''.join(map(str, self.subtokens))
def __eq__(self, other):
return (self.subtokens == other.subtokens)
def less_than(self, other):
return (self.subtokens < other.subtokens)
def next(self):
other = AlphanumericVersionToken(None)
other.subtokens = self.subtokens[:]
subtok = other.subtokens[-1]
if subtok.n is None:
other.subtokens[-1] = _SubToken(subtok.s + '_')
else:
other.subtokens.append(_SubToken('_'))
return other
@classmethod
def _parse(cls, s):
subtokens = []
alphas = cls.numeric_regex.split(s)
numerics = cls.numeric_regex.findall(s)
b = True
while alphas or numerics:
if b:
alpha = alphas[0]
alphas = alphas[1:]
if alpha:
subtokens.append(_SubToken(alpha))
else:
numeric = numerics[0]
numerics = numerics[1:]
subtokens.append(_SubToken(numeric))
b = not b
return subtokens
def reverse_sort_key(comparable):
"""Key that gives reverse sort order on versions and version ranges.
Example:
>>> Version("1.0") < Version("2.0")
True
>>> reverse_sort_key(Version("1.0")) < reverse_sort_key(Version("2.0"))
False
Args:
comparable (`Version` or `VesionRange`): Object to wrap.
Returns:
`_ReversedComparable`: Wrapper object that reverses comparisons.
"""
return _ReversedComparable(comparable)
class Version(_Comparable):
"""Version object.
A Version is a sequence of zero or more version tokens, separated by either
a dot '.' or hyphen '-' delimiters. Note that separators only affect Version
objects cosmetically - in other words, the version '1.0.0' is equivalent to
'1-0-0'.
The empty version '' is the smallest possible version, and can be used to
represent an unversioned resource.
"""
inf = None
def __init__(self, ver_str='', make_token=AlphanumericVersionToken):
"""Create a Version object.
Args:
ver_str: Version string.
make_token: Callable that creates a VersionToken subclass from a
string.
"""
self.tokens = []
self.seps = []
self._str = None
self._hash = None
if ver_str:
toks = re_token.findall(ver_str)
if not toks:
raise VersionError(ver_str)
seps = re_token.split(ver_str)
if seps[0] or seps[-1] or max(len(x) for x in seps) > 1:
raise VersionError("Invalid version syntax: '%s'" % ver_str)
for tok in toks:
try:
self.tokens.append(make_token(tok))
except VersionError as e:
raise VersionError("Invalid version '%s': %s"
% (ver_str, str(e)))
self.seps = seps[1:-1]
def copy(self):
"""Returns a copy of the version."""
other = Version(None)
other.tokens = self.tokens[:]
other.seps = self.seps[:]
return other
def trim(self, len_):
"""Return a copy of the version, possibly with less tokens.
Args:
len_ (int): New version length. If >= current length, an
unchanged copy of the version is returned.
"""
other = Version(None)
other.tokens = self.tokens[:len_]
other.seps = self.seps[:len_ - 1]
return other
def next(self):
"""Return 'next' version. Eg, next(1.2) is 1.2_"""
if self.tokens:
other = self.copy()
tok = other.tokens.pop()
other.tokens.append(tok.next())
return other
else:
return Version.inf
@property
def major(self):
"""Semantic versioning major version."""
return self[0]
@property
def minor(self):
"""Semantic versioning minor version."""
return self[1]
@property
def patch(self):
"""Semantic versioning patch version."""
return self[2]
def as_tuple(self):
"""Convert to a tuple of strings.
Example:
>>> print Version("1.2.12").as_tuple()
('1', '2', '12')
"""
return tuple(map(str, self.tokens))
def __len__(self):
return len(self.tokens or [])
def __getitem__(self, index):
try:
return (self.tokens or [])[index]
except IndexError:
raise IndexError("version token index out of range")
def __nonzero__(self):
"""The empty version equates to False."""
return bool(self.tokens)
def __eq__(self, other):
return isinstance(other, Version) and self.tokens == other.tokens
def __lt__(self, other):
if self.tokens is None:
return False
elif other.tokens is None:
return True
else:
return (self.tokens < other.tokens)
def __hash__(self):
if self._hash is None:
self._hash = hash(None) if self.tokens is None \
else hash(tuple(map(str, self.tokens)))
return self._hash
def __str__(self):
if self._str is None:
self._str = "[INF]" if self.tokens is None \
else ''.join(str(x) + y for x, y in zip(self.tokens, self.seps + ['']))
return self._str
# internal use only
Version.inf = Version()
Version.inf.tokens = None
class _LowerBound(_Comparable):
min = None
def __init__(self, version, inclusive):
self.version = version
self.inclusive = inclusive
def __str__(self):
if self.version:
s = "%s+" if self.inclusive else ">%s"
return s % self.version
else:
return '' if self.inclusive else ">"
def __eq__(self, other):
return (self.version == other.version) \
and (self.inclusive == other.inclusive)
def __lt__(self, other):
return (self.version < other.version) \
or ((self.version == other.version)
and (self.inclusive and not other.inclusive))
def __hash__(self):
return hash((self.version, self.inclusive))
def contains_version(self, version):
return (version > self.version) \
or (self.inclusive and (version == self.version))
_LowerBound.min = _LowerBound(Version(), True)
class _UpperBound(_Comparable):
inf = None
def __init__(self, version, inclusive):
self.version = version
self.inclusive = inclusive
if not version and not inclusive:
raise VersionError("Invalid upper bound: '%s'" % str(self))
def __str__(self):
s = "<=%s" if self.inclusive else "<%s"
return s % self.version
def __eq__(self, other):
return (self.version == other.version) \
and (self.inclusive == other.inclusive)
def __lt__(self, other):
return (self.version < other.version) \
or ((self.version == other.version)
and (not self.inclusive and other.inclusive))
def __hash__(self):
return hash((self.version, self.inclusive))
def contains_version(self, version):
return (version < self.version) \
or (self.inclusive and (version == self.version))
_UpperBound.inf = _UpperBound(Version.inf, True)
class _Bound(_Comparable):
any = None
def __init__(self, lower=None, upper=None, invalid_bound_error=True):
self.lower = lower or _LowerBound.min
self.upper = upper or _UpperBound.inf
if (invalid_bound_error and
(self.lower.version > self.upper.version
or ((self.lower.version == self.upper.version)
and not (self.lower.inclusive and self.upper.inclusive)))):
raise VersionError("Invalid bound")
def __str__(self):
if self.upper.version == Version.inf:
return str(self.lower)
elif self.lower.version == self.upper.version:
return "==%s" % str(self.lower.version)
elif self.lower.inclusive and self.upper.inclusive:
if self.lower.version:
return "%s..%s" % (self.lower.version, self.upper.version)
else:
return "<=%s" % self.upper.version
elif (self.lower.inclusive and not self.upper.inclusive) \
and (self.lower.version.next() == self.upper.version):
return str(self.lower.version)
else:
return "%s%s" % (self.lower, self.upper)
def __eq__(self, other):
return (self.lower == other.lower) and (self.upper == other.upper)
def __lt__(self, other):
return (self.lower, self.upper) < (other.lower, other.upper)
def __hash__(self):
return hash((self.lower, self.upper))
def lower_bounded(self):
return (self.lower != _LowerBound.min)
def upper_bounded(self):
return (self.upper != _UpperBound.inf)
def contains_version(self, version):
return (self.version_containment(version) == 0)
def version_containment(self, version):
if not self.lower.contains_version(version):
return -1
if not self.upper.contains_version(version):
return 1
return 0
def contains_bound(self, bound):
return (self.lower <= bound.lower) and (self.upper >= bound.upper)
def intersects(self, other):
lower = max(self.lower, other.lower)
upper = min(self.upper, other.upper)
return (lower.version < upper.version) or \
((lower.version == upper.version) and
(lower.inclusive and upper.inclusive))
def intersection(self, other):
lower = max(self.lower, other.lower)
upper = min(self.upper, other.upper)
if (lower.version < upper.version) or \
((lower.version == upper.version) and
(lower.inclusive and upper.inclusive)):
return _Bound(lower, upper)
else:
return None
_Bound.any = _Bound()
class _VersionRangeParser(object):
debug = False # set to True to enable parser debugging
re_flags = (re.VERBOSE | re.DEBUG) if debug else re.VERBOSE
# The regular expression for a version - one or more version tokens
# followed by a non-capturing group of version separator followed by
# one or more version tokens.
version_group = r"([0-9a-zA-Z_]+(?:[.-][0-9a-zA-Z_]+)*)" # A Version Number
version_range_regex = \
(r" ^(?P<version>{version_group})$"
"|" # Or match an exact version number (e.g. ==1.0.0)
" ^(?P<exact_version>"
" ==" # Required == operator
" (?P<exact_version_group>{version_group})?"
" )$"
"|" # Or match an inclusive bound (e.g. 1.0.0..2.0.0)
" ^(?P<inclusive_bound>"
" (?P<inclusive_lower_version>{version_group})?"
" \.\." # Required .. operator
" (?P<inclusive_upper_version>{version_group})?"
" )$"
"|" # Or match a lower bound (e.g. 1.0.0+)
" ^(?P<lower_bound>"
" (?P<lower_bound_prefix>>|>=)?" # Bound is exclusive?
" (?P<lower_version>{version_group})?"
" (?(lower_bound_prefix)|\+)" # + only if bound is not exclusive
" )$"
"|" # Or match an upper bound (e.g. <=1.0.0)
" ^(?P<upper_bound>"
" (?P<upper_bound_prefix><(?={version_group})|<=)?" # Bound is exclusive?
" (?P<upper_version>{version_group})?"
" )$"
"|" # Or match a range (e.g. 1.0.0+<2.0.0)
" ^(?P<range>"
" (?P<range_lower>"
" (?P<range_lower_prefix>>|>=)?" # Lower bound is exclusive?
" (?P<range_lower_version>{version_group})?"
" (?(range_lower_prefix)|\+)?" # + only if lower bound is not exclusive
" )(?P<range_upper>"
" (?(range_lower_version),?|)" # , only if lower bound is found
" (?P<range_upper_prefix><(?={version_group})|<=)" # <= only if followed by a version group
" (?P<range_upper_version>{version_group})?"
" )"
" )$").format(version_group=version_group)
regex = re.compile(version_range_regex, re_flags)
def __init__(self, input_string, make_token, invalid_bound_error=True):
self.make_token = make_token
self._groups = {}
self._input_string = input_string
self.bounds = []
self.invalid_bound_error = invalid_bound_error
for part in input_string.split("|"):
if part == '':
version = self._create_version_from_token(part)
self.bounds.append(_Bound(version, None))
continue
match = re.search(self.regex, part)
if not match:
raise ParseException("Syntax error in version range '%s'" % part)
self._groups = match.groupdict()
if self._groups['version']:
self._act_version()
if self._groups['exact_version']:
self._act_exact_version()
if self._groups['inclusive_bound']:
self._act_bound()
if self._groups['lower_bound']:
self._act_lower_bound()
if self._groups['upper_bound']:
self._act_upper_bound()
if self._groups['range']:
self._act_lower_and_upper_bound()
def _is_lower_bound_exclusive(self, token):
return True if token == ">" else False
def _is_upper_bound_exclusive(self, token):
return True if token == "<" else False
def _create_version_from_token(self, token):
return Version(token, make_token=self.make_token)
def action(fn):
def fn_(self):
result = fn(self)
if self.debug:
label = fn.__name__.replace("_act_", "")
print "%-21s: %s" % (label, self._input_string)
for key, value in self._groups.items():
print " %-17s= %s" % (key, value)
print " %-17s= %s" % ("bounds", self.bounds)
return result
return fn_
@action
def _act_version(self):
version = self._create_version_from_token(self._groups['version'])
lower_bound = _LowerBound(version, True)
upper_bound = _UpperBound(version.next(), False) if version else None
self.bounds.append(_Bound(lower_bound, upper_bound))
@action
def _act_exact_version(self):
version = self._create_version_from_token(self._groups['exact_version_group'])
lower_bound = _LowerBound(version, True)
upper_bound = _UpperBound(version, True)
self.bounds.append(_Bound(lower_bound, upper_bound))
@action
def _act_bound(self):
lower_version = self._create_version_from_token(self._groups['inclusive_lower_version'])
lower_bound = _LowerBound(lower_version, True)
upper_version = self._create_version_from_token(self._groups['inclusive_upper_version'])
upper_bound = _UpperBound(upper_version, True)
self.bounds.append(_Bound(lower_bound, upper_bound, self.invalid_bound_error))
@action
def _act_lower_bound(self):
version = self._create_version_from_token(self._groups['lower_version'])
exclusive = self._is_lower_bound_exclusive(self._groups['lower_bound_prefix'])
lower_bound = _LowerBound(version, not exclusive)
self.bounds.append(_Bound(lower_bound, None))
@action
def _act_upper_bound(self):
version = self._create_version_from_token(self._groups['upper_version'])
exclusive = self._is_upper_bound_exclusive(self._groups['upper_bound_prefix'])
upper_bound = _UpperBound(version, not exclusive)
self.bounds.append(_Bound(None, upper_bound))
@action
def _act_lower_and_upper_bound(self):
lower_bound = None
upper_bound = None
if self._groups['range_lower']:
version = self._create_version_from_token(self._groups['range_lower_version'])
exclusive = self._is_lower_bound_exclusive(self._groups['range_lower_prefix'])
lower_bound = _LowerBound(version, not exclusive)
if self._groups['range_upper']:
version = self._create_version_from_token(self._groups['range_upper_version'])
exclusive = self._is_upper_bound_exclusive(self._groups['range_upper_prefix'])
upper_bound = _UpperBound(version, not exclusive)
self.bounds.append(_Bound(lower_bound, upper_bound, self.invalid_bound_error))
class VersionRange(_Comparable):
"""Version range.
A version range is a set of one or more contiguous ranges of versions. For
example, "3.0 or greater, but less than 4" is a contiguous range that contains
versions such as "3.0", "3.1.0", "3.99" etc. Version ranges behave something
like sets - they can be intersected, added and subtracted, but can also be
inverted. You can test to see if a Version is contained within a VersionRange.
A VersionRange "3" (for example) is the superset of any version "3[.X.X...]".
The version "3" itself is also within this range, and is smaller than "3.0"
- any version with common leading tokens, but with a larger token count, is
the larger version of the two.
VersionRange objects have a flexible syntax that let you describe any
combination of contiguous ranges, including inclusive and exclusive upper
and lower bounds. This is best explained by example (those listed on the
same line are equivalent):
"3": 'superset' syntax, contains "3", "3.0", "3.1.4" etc;
"2+", ">=2": inclusive lower bound syntax, contains "2", "2.1", "5.0.0" etc;
">2": exclusive lower bound;
"<5": exclusive upper bound;
"<=5": inclusive upper bound;
"1+<5", ">=1<5": inclusive lower, exclusive upper. The most common form of
a 'bounded' version range (ie, one with a lower and upper bound);
">1<5": exclusive lower, exclusive upper;
">1<=5": exclusive lower, inclusive upper;
"1+<=5", "1..5": inclusive lower, inclusive upper;
"==2": a range that contains only the single version "2".
To describe more than one contiguous range, seperate ranges with the or '|'
symbol. For example, the version range "4|6+" contains versions such as "4",
"4.0", "4.3.1", "6", "6.1", "10.0.0", but does not contain any version
"5[.X.X...X]". If you provide multiple ranges that overlap, they will be
automatically optimised - for example, the version range "3+<6|4+<8"
becomes "3+<8".
Note that the empty string version range represents the superset of all
possible versions - this is called the "any" range. The empty version can
also be used as an upper or lower bound, leading to some odd but perfectly
valid version range syntax. For example, ">" is a valid range - read like
">''", it means "any version greater than the empty version".
To help with readability, bounded ranges can also have their bounds separated
with a comma, eg ">=2,<=6". The comma is purely cosmetic and is dropped in
the string representation.
"""
def __init__(self, range_str='', make_token=AlphanumericVersionToken,
invalid_bound_error=True):
"""Create a VersionRange object.
Args:
range_str: Range string, such as "3", "3+<4.5", "2|6+". The range
will be optimised, so the string representation of this instance
may not match range_str. For example, "3+<6|4+<8" == "3+<8".
make_token: Version token class to use.
invalid_bound_error (bool): If True, raise an exception if an
impossible range is given, such as '3+<2'.
"""
self._str = None
self.bounds = []
if range_str is None:
return
try:
parser = _VersionRangeParser(range_str, make_token,
invalid_bound_error=invalid_bound_error)
bounds = parser.bounds
except ParseException as e:
raise VersionError("Syntax error in version range '%s': %s"
% (range_str, str(e)))
except VersionError as e:
raise VersionError("Invalid version range '%s': %s"
% (range_str, str(e)))
if bounds:
self.bounds = self._union(bounds)
else:
self.bounds.append(_Bound.any)
def is_any(self):
"""Returns True if this is the "any" range, ie the empty string range
that contains all versions."""
return (len(self.bounds) == 1) and (self.bounds[0] == _Bound.any)
def lower_bounded(self):
"""Returns True if the range has a lower bound (that is not the empty
version)."""
return self.bounds[0].lower_bounded()
def upper_bounded(self):
"""Returns True if the range has an upper bound."""
return self.bounds[-1].upper_bounded()
def bounded(self):
"""Returns True if the range has a lower and upper bound."""
return (self.lower_bounded() and self.upper_bounded())
def issuperset(self, range):
"""Returns True if the VersionRange is contained within this range.
"""
return self._issuperset(self.bounds, range.bounds)
def issubset(self, range):
"""Returns True if we are contained within the version range.
"""
return range.issuperset(self)
def union(self, other):
"""OR together version ranges.
Calculates the union of this range with one or more other ranges.
Args:
other: VersionRange object (or list of) to OR with.
Returns:
New VersionRange object representing the union.
"""
if not hasattr(other, "__iter__"):
other = [other]
bounds = self.bounds[:]
for range in other:
bounds += range.bounds
bounds = self._union(bounds)
range = VersionRange(None)
range.bounds = bounds
return range
def intersection(self, other):
"""AND together version ranges.
Calculates the intersection of this range with one or more other ranges.
Args:
other: VersionRange object (or list of) to AND with.
Returns:
New VersionRange object representing the intersection, or None if
no ranges intersect.
"""
if not hasattr(other, "__iter__"):
other = [other]
bounds = self.bounds
for range in other:
bounds = self._intersection(bounds, range.bounds)
if not bounds:
return None
range = VersionRange(None)
range.bounds = bounds
return range
def inverse(self):
"""Calculate the inverse of the range.
Returns:
New VersionRange object representing the inverse of this range, or
None if there is no inverse (ie, this range is the any range).
"""
if self.is_any():
return None
else:
bounds = self._inverse(self.bounds)
range = VersionRange(None)
range.bounds = bounds
return range
def intersects(self, other):
"""Determine if we intersect with another range.
Args:
other: VersionRange object.
Returns:
True if the ranges intersect, False otherwise.
"""
return self._intersects(self.bounds, other.bounds)
def split(self):
"""Split into separate contiguous ranges.
Returns:
A list of VersionRange objects. For example, the range "3|5+" will
be split into ["3", "5+"].
"""
ranges = []
for bound in self.bounds:
range = VersionRange(None)
range.bounds = [bound]
ranges.append(range)
return ranges
@classmethod
def as_span(cls, lower_version=None, upper_version=None,
lower_inclusive=True, upper_inclusive=True):
"""Create a range from lower_version..upper_version.
Args:
lower_version: Version object representing lower bound of the range.
upper_version: Version object representing upper bound of the range.
Returns:
`VersionRange` object.
"""
lower = (None if lower_version is None
else _LowerBound(lower_version, lower_inclusive))
upper = (None if upper_version is None
else _UpperBound(upper_version, upper_inclusive))
bound = _Bound(lower, upper)
range = cls(None)
range.bounds = [bound]
return range
@classmethod
def from_version(cls, version, op=None):
"""Create a range from a version.
Args:
version: Version object. This is used as the upper/lower bound of
the range.
op: Operation as a string. One of 'gt'/'>', 'gte'/'>=', lt'/'<',
'lte'/'<=', 'eq'/'=='. If None, a bounded range will be created
that contains the version superset.
Returns:
`VersionRange` object.
"""
lower = None
upper = None
if op is None:
lower = _LowerBound(version, True)
upper = _UpperBound(version.next(), False)
elif op in ("eq", "=="):
lower = _LowerBound(version, True)
upper = _UpperBound(version, True)
elif op in ("gt", ">"):
lower = _LowerBound(version, False)
elif op in ("gte", ">="):
lower = _LowerBound(version, True)
elif op in ("lt", "<"):
upper = _UpperBound(version, False)
elif op in ("lte", "<="):
upper = _UpperBound(version, True)
else:
raise VersionError("Unknown bound operation '%s'" % op)
bound = _Bound(lower, upper)
range = cls(None)
range.bounds = [bound]
return range
@classmethod
def from_versions(cls, versions):
"""Create a range from a list of versions.
This method creates a range that contains only the given versions and
no other. Typically the range looks like (for eg) "==3|==4|==5.1".
Args:
versions: List of Version objects.
Returns:
`VersionRange` object.
"""
range = cls(None)
range.bounds = []
for version in dedup(sorted(versions)):
lower = _LowerBound(version, True)
upper = _UpperBound(version, True)
bound = _Bound(lower, upper)
range.bounds.append(bound)
return range
def to_versions(self):
"""Returns exact version ranges as Version objects, or None if there
are no exact version ranges present.
"""
versions = []
for bound in self.bounds:
if bound.lower.inclusive and bound.upper.inclusive \
and (bound.lower.version == bound.upper.version):
versions.append(bound.lower.version)
return versions or None
def contains_version(self, version):
"""Returns True if version is contained in this range."""
if len(self.bounds) < 5:
# not worth overhead of binary search
for bound in self.bounds:
if bound.contains_version(version):
return True
else:
_, contains = self._contains_version(version)
return contains
return False
def iter_intersect_test(self, iterable, key=None, descending=False):
"""Performs containment tests on a sorted list of versions.
This is more optimal than performing separate containment tests on a
list of sorted versions.
Args:
iterable: An ordered sequence of versioned objects. If the list
is not sorted by version, behaviour is undefined.
key (callable): Function that returns a `Version` given an object
from `iterable`. If None, the identity function is used.
descending (bool): Set to True if `iterable` is in descending
version order.
Returns:
An iterator that returns (bool, object) tuples, where 'object' is
the original object in `iterable`, and the bool indicates whether
that version is contained in this range.
"""
return _ContainsVersionIterator(self, iterable, key, descending)
def iter_intersecting(self, iterable, key=None, descending=False):
"""Like `iter_intersect_test`, but returns intersections only.
Returns:
An iterator that returns items from `iterable` that intersect.
"""
return _ContainsVersionIterator(self, iterable, key, descending,
mode=_ContainsVersionIterator.MODE_INTERSECTING)
def iter_non_intersecting(self, iterable, key=None, descending=False):
"""Like `iter_intersect_test`, but returns non-intersections only.
Returns:
An iterator that returns items from `iterable` that don't intersect.
"""
return _ContainsVersionIterator(self, iterable, key, descending,
mode=_ContainsVersionIterator.MODE_NON_INTERSECTING)
def span(self):
"""Return a contiguous range that is a superset of this range.
Returns:
A VersionRange object representing the span of this range. For
example, the span of "2+<4|6+<8" would be "2+<8".
"""
other = VersionRange(None)
bound = _Bound(self.bounds[0].lower, self.bounds[-1].upper)
other.bounds = [bound]
return other
# TODO have this return a new VersionRange instead - this currently breaks
# VersionRange immutability, and could invalidate __str__.
def visit_versions(self, func):
"""Visit each version in the range, and apply a function to each.
This is for advanced usage only.
If `func` returns a `Version`, this call will change the versions in
place.
It is possible to change versions in a way that is nonsensical - for
example setting an upper bound to a smaller version than the lower bound.
Use at your own risk.
Args:
func (callable): Takes a `Version` instance arg, and is applied to
every version in the range. If `func` returns a `Version`, it
will replace the existing version, updating this `VersionRange`
instance in place.
"""
for bound in self.bounds:
if bound.lower is not _LowerBound.min:
result = func(bound.lower.version)
if isinstance(result, Version):
bound.lower.version = result
if bound.upper is not _UpperBound.inf:
result = func(bound.upper.version)
if isinstance(result, Version):
bound.upper.version = result
def __contains__(self, version_or_range):
if isinstance(version_or_range, Version):
return self.contains_version(version_or_range)
else:
return self.issuperset(version_or_range)
def __len__(self):
return len(self.bounds)
def __invert__(self):
return self.inverse()
def __and__(self, other):
return self.intersection(other)
def __or__(self, other):
return self.union(other)
def __add__(self, other):
return self.union(other)
def __sub__(self, other):
inv = other.inverse()
return None if inv is None else self.intersection(inv)
def __str__(self):
if self._str is None:
self._str = '|'.join(map(str, self.bounds))
return self._str
def __eq__(self, other):
return isinstance(other, VersionRange) and self.bounds == other.bounds
def __lt__(self, other):
return (self.bounds < other.bounds)
def __hash__(self):
return hash(tuple(self.bounds))
def _contains_version(self, version):
vbound = _Bound(_LowerBound(version, True))
i = bisect_left(self.bounds, vbound)
if i and self.bounds[i - 1].contains_version(version):
return i - 1, True
if (i < len(self.bounds)) and self.bounds[i].contains_version(version):
return i, True
return i, False
@classmethod
def _union(cls, bounds):
if len(bounds) < 2:
return bounds
bounds_ = list(sorted(bounds))
new_bounds = []
prev_bound = None
upper = None
start = 0
for i, bound in enumerate(bounds_):
if i and ((bound.lower.version > upper.version)
or ((bound.lower.version == upper.version)
and (not bound.lower.inclusive)
and (not prev_bound.upper.inclusive))):
new_bound = _Bound(bounds_[start].lower, upper)
new_bounds.append(new_bound)
start = i
prev_bound = bound
upper = bound.upper if upper is None else max(upper, bound.upper)
new_bound = _Bound(bounds_[start].lower, upper)
new_bounds.append(new_bound)
return new_bounds
@classmethod
def _intersection(cls, bounds1, bounds2):
new_bounds = []
for bound1 in bounds1:
for bound2 in bounds2:
b = bound1.intersection(bound2)
if b:
new_bounds.append(b)
return new_bounds
@classmethod
def _inverse(cls, bounds):
lbounds = [None]
ubounds = []
for bound in bounds:
if not bound.lower.version and bound.lower.inclusive:
ubounds.append(None)
else:
b = _UpperBound(bound.lower.version, not bound.lower.inclusive)
ubounds.append(b)
if bound.upper.version == Version.inf:
lbounds.append(None)
else:
b = _LowerBound(bound.upper.version, not bound.upper.inclusive)
lbounds.append(b)
ubounds.append(None)
new_bounds = []
for lower, upper in zip(lbounds, ubounds):
if not (lower is None and upper is None):
new_bounds.append(_Bound(lower, upper))
return new_bounds
@classmethod
def _issuperset(cls, bounds1, bounds2):
lo = 0
for bound2 in bounds2:
i = bisect_left(bounds1, bound2, lo=lo)
if i and bounds1[i - 1].contains_bound(bound2):
lo = i - 1
continue
if (i < len(bounds1)) and bounds1[i].contains_bound(bound2):
lo = i
continue
return False
return True
@classmethod
def _intersects(cls, bounds1, bounds2):
bounds1, bounds2 = sorted((bounds1, bounds2), key=lambda x: len(x))
if len(bounds2) < 5:
# not worth overhead of binary search
for bound1 in bounds1:
for bound2 in bounds2:
if bound1.intersects(bound2):
return True
return False
lo = 0
for bound1 in bounds1:
i = bisect_left(bounds2, bound1, lo=lo)
if i and bounds2[i - 1].intersects(bound1):
return True
if (i < len(bounds2)) and bounds2[i].intersects(bound1):
return True
lo = max(i - 1, 0)
return False
class _ContainsVersionIterator(object):
MODE_INTERSECTING = 0
MODE_NON_INTERSECTING = 2
MODE_ALL = 3
def __init__(self, range_, iterable, key=None, descending=False, mode=MODE_ALL):
self.mode = mode
self.range_ = range_
self.index = None
self.nbounds = len(self.range_.bounds)
self._constant = True if range_.is_any() else None
self.fn = self._descending if descending else self._ascending
self.it = iter(iterable)
if key is None:
key = lambda x: x
self.keyfunc = key
if mode == self.MODE_ALL:
self.next_fn = self._next
elif mode == self.MODE_INTERSECTING:
self.next_fn = self._next_intersecting
else:
self.next_fn = self._next_non_intersecting
def __iter__(self):
return self
def next(self):
return self.next_fn()
def _next(self):
value = next(self.it)
if self._constant is not None:
return self._constant, value
version = self.keyfunc(value)
intersects = self.fn(version)
return intersects, value
def _next_intersecting(self):
while True:
value = next(self.it)
if self._constant:
return value
elif self._constant is not None:
raise StopIteration
version = self.keyfunc(value)
intersects = self.fn(version)
if intersects:
return value
def _next_non_intersecting(self):
while True:
value = next(self.it)
if self._constant:
raise StopIteration
elif self._constant is not None:
return value
version = self.keyfunc(value)
intersects = self.fn(version)
if not intersects:
return value
@property
def _bound(self):
if self.index < self.nbounds:
return self.range_.bounds[self.index]
else:
return None
def _ascending(self, version):
if self.index is None:
self.index, contains = self.range_._contains_version(version)
bound = self._bound
if contains:
if not bound.upper_bounded():
self._constant = True
return True
elif bound is None: # past end of last bound
self._constant = False
return False
else:
return False # there are more bound(s) ahead
else:
bound = self._bound
j = bound.version_containment(version)
if j == 0:
return True
elif j == -1:
return False
else:
while True:
self.index += 1
bound = self._bound
if bound is None: # past end of last bound
self._constant = False
return False
else:
j = bound.version_containment(version)
if j == 0:
if not bound.upper_bounded():
self._constant = True
return True
elif j == -1:
return False
def _descending(self, version):
if self.index is None:
self.index, contains = self.range_._contains_version(version)
bound = self._bound
if contains:
if not bound.lower_bounded():
self._constant = True
return True
elif bound is None: # past end of last bound
self.index = self.nbounds - 1
return False
elif self.index == 0: # before start of first bound
self._constant = False
return False
else:
self.index -= 1
return False
else:
bound = self._bound
j = bound.version_containment(version)
if j == 0:
return True
elif j == 1:
return False
else:
while self.index:
self.index -= 1
bound = self._bound
j = bound.version_containment(version)
if j == 0:
if not bound.lower_bounded():
self._constant = True
return True
elif j == 1:
return False
self._constant = False # before start of first bound
return False
|
ToonBoxEntertainment/rez
|
src/rez/vendor/version/version.py
|
Python
|
lgpl-3.0
| 46,686
|
[
"VisIt"
] |
535d8926d4e07e37a70b21f15126dded2120619fa14aa0671b6c5217f2c308b7
|
#!/usr/bin/python
from brian import *
import pylab
import numpy
import scipy
import scipy.optimize
from Model import *
reference_params = {'gNa': 28, 'C': 21, 'ENa': 50, 'EK':-85, 'EL': -65, 'gK': 11.2, 'gL': 2.8,
'theta_m': -34, 'sigma_m': -5, 'tau_ma': 0.1,
'theta_n': -29, 'sigma_n': -4, 'tau_na': 10
}
image_dir = 'report/images/'
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
def main():
tend=200
cur = 100
tr = HH_Step(reference_params, Step_tstart = 30, Duration = 100, I_amp=cur, tend=tend)
spike_times = detect_spikes(tr['t']/ms, tr['v'][0]/mV, threshold=0)
figure(1)
subplot(211)
plot(tr['v'][0]/mV, tr['tau_m'][0]/ms, label=r'$\tau_m$')
plot(tr['v'][0]/mV, tr['tau_n'][0]/ms, label=r'$\tau_n$')
ylabel("ms")
title(r'Time constant')
legend()
subplot(212)
plot(tr['v'][0]/mV, tr['m_inf'][0], label=r'$m_\infty$')
plot(tr['v'][0]/mV, tr['n_inf'][0], label=r'$n_\infty$')
xlabel("Voltage, mV")
title(r'$X_\infty$')
legend()
savefig(image_dir + "tau_inf.pdf")
close()
figure(2)
subplot(411)
plot(tr['t']/ms, tr['v'][0]/mV)
scatter(spike_times, tr['v'][0][map(lambda x: where(tr['t']/ms == x)[0][0], spike_times)]/mV, label="Spikes")
title('Membrane potential')
xlim(0, tend)
ylabel("Voltage, mV")
legend()
subplot(412)
plot(tr['t']/ms, tr['I'][0]/pamp)
xlim(0, tend)
ylim(0, cur*1.5)
ylabel(r"$I_{ext}$, pA")
title('Injected current')
subplot(413)
plot(tr['t']/ms, tr['m'][0], label="m")
plot(tr['t']/ms, tr['n'][0], label="n")
xlim(0, tend)
title('Traces of gate variables')
legend()
subplot(414)
plot(tr['t']/ms, tr['INa'][0]/namp, label="Na")
plot(tr['t']/ms, tr['IK'][0]/namp, label="K")
xlim(0, tend)
ylabel(r"I, nA")
xlabel("Time, ms")
title('Currents')
legend()
savefig(image_dir + "step_cur.pdf")
close()
if __name__ == '__main__':
main()
|
Aydarkhan/bursting_hh
|
ex1.py
|
Python
|
gpl-2.0
| 2,041
|
[
"Brian"
] |
b01ca6d0d300756021eabf10680854ef6ca09e954179a921f11d2f6c414b4b77
|
import os.path
import warnings
from glob import glob
from io import BytesIO
from numbers import Number
from pathlib import Path
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Callable,
Dict,
Hashable,
Iterable,
Mapping,
Tuple,
Union,
)
import numpy as np
from .. import backends, coding, conventions
from ..core import indexing
from ..core.combine import (
_infer_concat_order_from_positions,
_nested_combine,
auto_combine,
combine_by_coords,
)
from ..core.dataarray import DataArray
from ..core.dataset import Dataset
from ..core.utils import close_on_error, is_grib_path, is_remote_uri
from .common import AbstractDataStore, ArrayWriter
from .locks import _get_scheduler
if TYPE_CHECKING:
try:
from dask.delayed import Delayed
except ImportError:
Delayed = None
DATAARRAY_NAME = "__xarray_dataarray_name__"
DATAARRAY_VARIABLE = "__xarray_dataarray_variable__"
def _get_default_engine_remote_uri():
try:
import netCDF4 # noqa: F401
engine = "netcdf4"
except ImportError: # pragma: no cover
try:
import pydap # noqa: F401
engine = "pydap"
except ImportError:
raise ValueError(
"netCDF4 or pydap is required for accessing "
"remote datasets via OPeNDAP"
)
return engine
def _get_default_engine_grib():
msgs = []
try:
import Nio # noqa: F401
msgs += ["set engine='pynio' to access GRIB files with PyNIO"]
except ImportError: # pragma: no cover
pass
try:
import cfgrib # noqa: F401
msgs += ["set engine='cfgrib' to access GRIB files with cfgrib"]
except ImportError: # pragma: no cover
pass
if msgs:
raise ValueError(" or\n".join(msgs))
else:
raise ValueError("PyNIO or cfgrib is required for accessing " "GRIB files")
def _get_default_engine_gz():
try:
import scipy # noqa: F401
engine = "scipy"
except ImportError: # pragma: no cover
raise ValueError("scipy is required for accessing .gz files")
return engine
def _get_default_engine_netcdf():
try:
import netCDF4 # noqa: F401
engine = "netcdf4"
except ImportError: # pragma: no cover
try:
import scipy.io.netcdf # noqa: F401
engine = "scipy"
except ImportError:
raise ValueError(
"cannot read or write netCDF files without "
"netCDF4-python or scipy installed"
)
return engine
def _get_engine_from_magic_number(filename_or_obj):
# check byte header to determine file type
if isinstance(filename_or_obj, bytes):
magic_number = filename_or_obj[:8]
else:
if filename_or_obj.tell() != 0:
raise ValueError(
"file-like object read/write pointer not at zero "
"please close and reopen, or use a context "
"manager"
)
magic_number = filename_or_obj.read(8)
filename_or_obj.seek(0)
if magic_number.startswith(b"CDF"):
engine = "scipy"
elif magic_number.startswith(b"\211HDF\r\n\032\n"):
engine = "h5netcdf"
if isinstance(filename_or_obj, bytes):
raise ValueError(
"can't open netCDF4/HDF5 as bytes "
"try passing a path or file-like object"
)
else:
if isinstance(filename_or_obj, bytes) and len(filename_or_obj) > 80:
filename_or_obj = filename_or_obj[:80] + b"..."
raise ValueError(
"{} is not a valid netCDF file "
"did you mean to pass a string for a path instead?".format(filename_or_obj)
)
return engine
def _get_default_engine(path, allow_remote=False):
if allow_remote and is_remote_uri(path):
engine = _get_default_engine_remote_uri()
elif is_grib_path(path):
engine = _get_default_engine_grib()
elif path.endswith(".gz"):
engine = _get_default_engine_gz()
else:
engine = _get_default_engine_netcdf()
return engine
def _normalize_path(path):
if is_remote_uri(path):
return path
else:
return os.path.abspath(os.path.expanduser(path))
def _validate_dataset_names(dataset):
"""DataArray.name and Dataset keys must be a string or None"""
def check_name(name):
if isinstance(name, str):
if not name:
raise ValueError(
"Invalid name for DataArray or Dataset key: "
"string must be length 1 or greater for "
"serialization to netCDF files"
)
elif name is not None:
raise TypeError(
"DataArray.name or Dataset key must be either a "
"string or None for serialization to netCDF files"
)
for k in dataset.variables:
check_name(k)
def _validate_attrs(dataset):
"""`attrs` must have a string key and a value which is either: a number,
a string, an ndarray or a list/tuple of numbers/strings.
"""
def check_attr(name, value):
if isinstance(name, str):
if not name:
raise ValueError(
"Invalid name for attr: string must be "
"length 1 or greater for serialization to "
"netCDF files"
)
else:
raise TypeError(
"Invalid name for attr: {} must be a string for "
"serialization to netCDF files".format(name)
)
if not isinstance(value, (str, Number, np.ndarray, np.number, list, tuple)):
raise TypeError(
"Invalid value for attr: {} must be a number, "
"a string, an ndarray or a list/tuple of "
"numbers/strings for serialization to netCDF "
"files".format(value)
)
# Check attrs on the dataset itself
for k, v in dataset.attrs.items():
check_attr(k, v)
# Check attrs on each variable within the dataset
for variable in dataset.variables.values():
for k, v in variable.attrs.items():
check_attr(k, v)
def _protect_dataset_variables_inplace(dataset, cache):
for name, variable in dataset.variables.items():
if name not in variable.dims:
# no need to protect IndexVariable objects
data = indexing.CopyOnWriteArray(variable._data)
if cache:
data = indexing.MemoryCachedArray(data)
variable.data = data
def _finalize_store(write, store):
""" Finalize this store by explicitly syncing and closing"""
del write # ensure writing is done first
store.close()
def load_dataset(filename_or_obj, **kwargs):
"""Open, load into memory, and close a Dataset from a file or file-like
object.
This is a thin wrapper around :py:meth:`~xarray.open_dataset`. It differs
from `open_dataset` in that it loads the Dataset into memory, closes the
file, and returns the Dataset. In contrast, `open_dataset` keeps the file
handle open and lazy loads its contents. All parameters are passed directly
to `open_dataset`. See that documentation for further details.
Returns
-------
dataset : Dataset
The newly created Dataset.
See Also
--------
open_dataset
"""
if "cache" in kwargs:
raise TypeError("cache has no effect in this context")
with open_dataset(filename_or_obj, **kwargs) as ds:
return ds.load()
def load_dataarray(filename_or_obj, **kwargs):
"""Open, load into memory, and close a DataArray from a file or file-like
object containing a single data variable.
This is a thin wrapper around :py:meth:`~xarray.open_dataarray`. It differs
from `open_dataarray` in that it loads the Dataset into memory, closes the
file, and returns the Dataset. In contrast, `open_dataarray` keeps the file
handle open and lazy loads its contents. All parameters are passed directly
to `open_dataarray`. See that documentation for further details.
Returns
-------
datarray : DataArray
The newly created DataArray.
See Also
--------
open_dataarray
"""
if "cache" in kwargs:
raise TypeError("cache has no effect in this context")
with open_dataarray(filename_or_obj, **kwargs) as da:
return da.load()
def open_dataset(
filename_or_obj,
group=None,
decode_cf=True,
mask_and_scale=None,
decode_times=True,
autoclose=None,
concat_characters=True,
decode_coords=True,
engine=None,
chunks=None,
lock=None,
cache=None,
drop_variables=None,
backend_kwargs=None,
use_cftime=None,
):
"""Open and decode a dataset from a file or file-like object.
Parameters
----------
filename_or_obj : str, Path, file or xarray.backends.*DataStore
Strings and Path objects are interpreted as a path to a netCDF file
or an OpenDAP URL and opened with python-netCDF4, unless the filename
ends with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
netCDF4 files).
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. mask_and_scale defaults to True except for the
pseudonetcdf backend.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
autoclose : bool, optional
If True, automatically close files to avoid OS Error of too many files
being open. However, this option doesn't work with streams, e.g.,
BytesIO.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib', \
'pseudonetcdf'}, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
chunks : int or dict, optional
If chunks is provided, it used to load the new dataset into dask
arrays. ``chunks={}`` loads the dataset with dask using a single
chunk for all arrays.
lock : False or duck threading.Lock, optional
Resource lock to use when reading data from disk. Only relevant when
using dask or another form of parallelism. By default, appropriate
locks are chosen to safely read and write files with the currently
active dask scheduler.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
backend_kwargs: dictionary, optional
A dictionary of keyword arguments to pass on to the backend. This
may be useful when backend options would improve performance or
allow user control of dataset processing.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
Returns
-------
dataset : Dataset
The newly created dataset.
Notes
-----
``open_dataset`` opens the file with read-only access. When you modify
values of a Dataset, even one linked to files on disk, only the in-memory
copy you are manipulating in xarray is modified: the original file on disk
is never touched.
See Also
--------
open_mfdataset
"""
engines = [
None,
"netcdf4",
"scipy",
"pydap",
"h5netcdf",
"pynio",
"cfgrib",
"pseudonetcdf",
]
if engine not in engines:
raise ValueError(
"unrecognized engine for open_dataset: {}\n"
"must be one of: {}".format(engine, engines)
)
if autoclose is not None:
warnings.warn(
"The autoclose argument is no longer used by "
"xarray.open_dataset() and is now ignored; it will be removed in "
"a future version of xarray. If necessary, you can control the "
"maximum number of simultaneous open files with "
"xarray.set_options(file_cache_maxsize=...).",
FutureWarning,
stacklevel=2,
)
if mask_and_scale is None:
mask_and_scale = not engine == "pseudonetcdf"
if not decode_cf:
mask_and_scale = False
decode_times = False
concat_characters = False
decode_coords = False
if cache is None:
cache = chunks is None
if backend_kwargs is None:
backend_kwargs = {}
def maybe_decode_store(store, lock=False):
ds = conventions.decode_cf(
store,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
)
_protect_dataset_variables_inplace(ds, cache)
if chunks is not None:
from dask.base import tokenize
# if passed an actual file path, augment the token with
# the file modification time
if isinstance(filename_or_obj, str) and not is_remote_uri(filename_or_obj):
mtime = os.path.getmtime(filename_or_obj)
else:
mtime = None
token = tokenize(
filename_or_obj,
mtime,
group,
decode_cf,
mask_and_scale,
decode_times,
concat_characters,
decode_coords,
engine,
chunks,
drop_variables,
use_cftime,
)
name_prefix = "open_dataset-%s" % token
ds2 = ds.chunk(chunks, name_prefix=name_prefix, token=token)
ds2._file_obj = ds._file_obj
else:
ds2 = ds
return ds2
if isinstance(filename_or_obj, Path):
filename_or_obj = str(filename_or_obj)
if isinstance(filename_or_obj, AbstractDataStore):
store = filename_or_obj
elif isinstance(filename_or_obj, str):
filename_or_obj = _normalize_path(filename_or_obj)
if engine is None:
engine = _get_default_engine(filename_or_obj, allow_remote=True)
if engine == "netcdf4":
store = backends.NetCDF4DataStore.open(
filename_or_obj, group=group, lock=lock, **backend_kwargs
)
elif engine == "scipy":
store = backends.ScipyDataStore(filename_or_obj, **backend_kwargs)
elif engine == "pydap":
store = backends.PydapDataStore.open(filename_or_obj, **backend_kwargs)
elif engine == "h5netcdf":
store = backends.H5NetCDFStore.open(
filename_or_obj, group=group, lock=lock, **backend_kwargs
)
elif engine == "pynio":
store = backends.NioDataStore(filename_or_obj, lock=lock, **backend_kwargs)
elif engine == "pseudonetcdf":
store = backends.PseudoNetCDFDataStore.open(
filename_or_obj, lock=lock, **backend_kwargs
)
elif engine == "cfgrib":
store = backends.CfGribDataStore(
filename_or_obj, lock=lock, **backend_kwargs
)
else:
if engine not in [None, "scipy", "h5netcdf"]:
raise ValueError(
"can only read bytes or file-like objects "
"with engine='scipy' or 'h5netcdf'"
)
engine = _get_engine_from_magic_number(filename_or_obj)
if engine == "scipy":
store = backends.ScipyDataStore(filename_or_obj, **backend_kwargs)
elif engine == "h5netcdf":
store = backends.H5NetCDFStore.open(
filename_or_obj, group=group, lock=lock, **backend_kwargs
)
with close_on_error(store):
ds = maybe_decode_store(store)
# Ensure source filename always stored in dataset object (GH issue #2550)
if "source" not in ds.encoding:
if isinstance(filename_or_obj, str):
ds.encoding["source"] = filename_or_obj
return ds
def open_dataarray(
filename_or_obj,
group=None,
decode_cf=True,
mask_and_scale=None,
decode_times=True,
autoclose=None,
concat_characters=True,
decode_coords=True,
engine=None,
chunks=None,
lock=None,
cache=None,
drop_variables=None,
backend_kwargs=None,
use_cftime=None,
):
"""Open an DataArray from a file or file-like object containing a single
data variable.
This is designed to read netCDF files with only one data variable. If
multiple variables are present then a ValueError is raised.
Parameters
----------
filename_or_obj : str, Path, file or xarray.backends.*DataStore
Strings and Paths are interpreted as a path to a netCDF file or an
OpenDAP URL and opened with python-netCDF4, unless the filename ends
with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
netCDF4 files).
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. mask_and_scale defaults to True except for the
pseudonetcdf backend.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \
optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
chunks : int or dict, optional
If chunks is provided, it used to load the new dataset into dask
arrays.
lock : False or duck threading.Lock, optional
Resource lock to use when reading data from disk. Only relevant when
using dask or another form of parallelism. By default, appropriate
locks are chosen to safely read and write files with the currently
active dask scheduler.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
backend_kwargs: dictionary, optional
A dictionary of keyword arguments to pass on to the backend. This
may be useful when backend options would improve performance or
allow user control of dataset processing.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
Notes
-----
This is designed to be fully compatible with `DataArray.to_netcdf`. Saving
using `DataArray.to_netcdf` and then loading with this function will
produce an identical result.
All parameters are passed directly to `xarray.open_dataset`. See that
documentation for further details.
See also
--------
open_dataset
"""
dataset = open_dataset(
filename_or_obj,
group=group,
decode_cf=decode_cf,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
autoclose=autoclose,
concat_characters=concat_characters,
decode_coords=decode_coords,
engine=engine,
chunks=chunks,
lock=lock,
cache=cache,
drop_variables=drop_variables,
backend_kwargs=backend_kwargs,
use_cftime=use_cftime,
)
if len(dataset.data_vars) != 1:
raise ValueError(
"Given file dataset contains more than one data "
"variable. Please read with xarray.open_dataset and "
"then select the variable you want."
)
else:
(data_array,) = dataset.data_vars.values()
data_array._file_obj = dataset._file_obj
# Reset names if they were changed during saving
# to ensure that we can 'roundtrip' perfectly
if DATAARRAY_NAME in dataset.attrs:
data_array.name = dataset.attrs[DATAARRAY_NAME]
del dataset.attrs[DATAARRAY_NAME]
if data_array.name == DATAARRAY_VARIABLE:
data_array.name = None
return data_array
class _MultiFileCloser:
__slots__ = ("file_objs",)
def __init__(self, file_objs):
self.file_objs = file_objs
def close(self):
for f in self.file_objs:
f.close()
def open_mfdataset(
paths,
chunks=None,
concat_dim="_not_supplied",
compat="no_conflicts",
preprocess=None,
engine=None,
lock=None,
data_vars="all",
coords="different",
combine="_old_auto",
autoclose=None,
parallel=False,
join="outer",
attrs_file=None,
**kwargs,
):
"""Open multiple files as a single dataset.
If combine='by_coords' then the function ``combine_by_coords`` is used to combine
the datasets into one before returning the result, and if combine='nested' then
``combine_nested`` is used. The filepaths must be structured according to which
combining function is used, the details of which are given in the documentation for
``combine_by_coords`` and ``combine_nested``. By default the old (now deprecated)
``auto_combine`` will be used, please specify either ``combine='by_coords'`` or
``combine='nested'`` in future. Requires dask to be installed. See documentation for
details on dask [1]_. Global attributes from the ``attrs_file`` are used
for the combined dataset.
Parameters
----------
paths : str or sequence
Either a string glob in the form ``"path/to/my/files/*.nc"`` or an explicit list of
files to open. Paths can be given as strings or as pathlib Paths. If
concatenation along more than one dimension is desired, then ``paths`` must be a
nested list-of-lists (see ``manual_combine`` for details). (A string glob will
be expanded to a 1-dimensional list.)
chunks : int or dict, optional
Dictionary with keys given by dimension names and values given by chunk sizes.
In general, these should divide the dimensions of each dataset. If int, chunk
each dimension by ``chunks``. By default, chunks will be chosen to load entire
input files into memory at once. This has a major impact on performance: please
see the full documentation for more details [2]_.
concat_dim : str, or list of str, DataArray, Index or None, optional
Dimensions to concatenate files along. You only need to provide this argument
if any of the dimensions along which you want to concatenate is not a dimension
in the original datasets, e.g., if you want to stack a collection of 2D arrays
along a third dimension. Set ``concat_dim=[..., None, ...]`` explicitly to
disable concatenation along a particular dimension.
combine : {'by_coords', 'nested'}, optional
Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is used to
combine all the data. If this argument is not provided, `xarray.auto_combine` is
used, but in the future this behavior will switch to use
`xarray.combine_by_coords` by default.
compat : {'identical', 'equals', 'broadcast_equals',
'no_conflicts', 'override'}, optional
String indicating how to compare variables of the same name for
potential conflicts when merging:
* 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
* 'equals': all values and dimensions must be the same.
* 'identical': all values, dimensions and attributes must be the
same.
* 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
* 'override': skip comparing and pick variable from first dataset
preprocess : callable, optional
If provided, call this function on each dataset prior to concatenation.
You can find the file-name from which each dataset was loaded in
``ds.encoding['source']``.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \
optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
lock : False or duck threading.Lock, optional
Resource lock to use when reading data from disk. Only relevant when
using dask or another form of parallelism. By default, appropriate
locks are chosen to safely read and write files with the currently
active dask scheduler.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
coords : {'minimal', 'different', 'all' or list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition the 'minimal' coordinates.
parallel : bool, optional
If True, the open and preprocess steps of this function will be
performed in parallel using ``dask.delayed``. Default is False.
join : {'outer', 'inner', 'left', 'right', 'exact, 'override'}, optional
String indicating how to combine differing indexes
(excluding concat_dim) in objects
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- 'override': if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
attrs_file : str or pathlib.Path, optional
Path of the file used to read global attributes from.
By default global attributes are read from the first file provided,
with wildcard matches sorted by filename.
**kwargs : optional
Additional arguments passed on to :py:func:`xarray.open_dataset`.
Returns
-------
xarray.Dataset
Notes
-----
``open_mfdataset`` opens files with read-only access. When you modify values
of a Dataset, even one linked to files on disk, only the in-memory copy you
are manipulating in xarray is modified: the original file on disk is never
touched.
See Also
--------
combine_by_coords
combine_nested
auto_combine
open_dataset
References
----------
.. [1] http://xarray.pydata.org/en/stable/dask.html
.. [2] http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance
"""
if isinstance(paths, str):
if is_remote_uri(paths):
raise ValueError(
"cannot do wild-card matching for paths that are remote URLs: "
"{!r}. Instead, supply paths as an explicit list of strings.".format(
paths
)
)
paths = sorted(glob(paths))
else:
paths = [str(p) if isinstance(p, Path) else p for p in paths]
if not paths:
raise OSError("no files to open")
# If combine='by_coords' then this is unnecessary, but quick.
# If combine='nested' then this creates a flat list which is easier to
# iterate over, while saving the originally-supplied structure as "ids"
if combine == "nested":
if str(concat_dim) == "_not_supplied":
raise ValueError("Must supply concat_dim when using " "combine='nested'")
else:
if isinstance(concat_dim, (str, DataArray)) or concat_dim is None:
concat_dim = [concat_dim]
combined_ids_paths = _infer_concat_order_from_positions(paths)
ids, paths = (list(combined_ids_paths.keys()), list(combined_ids_paths.values()))
open_kwargs = dict(
engine=engine, chunks=chunks or {}, lock=lock, autoclose=autoclose, **kwargs
)
if parallel:
import dask
# wrap the open_dataset, getattr, and preprocess with delayed
open_ = dask.delayed(open_dataset)
getattr_ = dask.delayed(getattr)
if preprocess is not None:
preprocess = dask.delayed(preprocess)
else:
open_ = open_dataset
getattr_ = getattr
datasets = [open_(p, **open_kwargs) for p in paths]
file_objs = [getattr_(ds, "_file_obj") for ds in datasets]
if preprocess is not None:
datasets = [preprocess(ds) for ds in datasets]
if parallel:
# calling compute here will return the datasets/file_objs lists,
# the underlying datasets will still be stored as dask arrays
datasets, file_objs = dask.compute(datasets, file_objs)
# Combine all datasets, closing them in case of a ValueError
try:
if combine == "_old_auto":
# Use the old auto_combine for now
# Remove this after deprecation cycle from #2616 is complete
basic_msg = dedent(
"""\
In xarray version 0.15 the default behaviour of `open_mfdataset`
will change. To retain the existing behavior, pass
combine='nested'. To use future default behavior, pass
combine='by_coords'. See
http://xarray.pydata.org/en/stable/combining.html#combining-multi
"""
)
warnings.warn(basic_msg, FutureWarning, stacklevel=2)
combined = auto_combine(
datasets,
concat_dim=concat_dim,
compat=compat,
data_vars=data_vars,
coords=coords,
join=join,
from_openmfds=True,
)
elif combine == "nested":
# Combined nested list by successive concat and merge operations
# along each dimension, using structure given by "ids"
combined = _nested_combine(
datasets,
concat_dims=concat_dim,
compat=compat,
data_vars=data_vars,
coords=coords,
ids=ids,
join=join,
)
elif combine == "by_coords":
# Redo ordering from coordinates, ignoring how they were ordered
# previously
combined = combine_by_coords(
datasets, compat=compat, data_vars=data_vars, coords=coords, join=join
)
else:
raise ValueError(
"{} is an invalid option for the keyword argument"
" ``combine``".format(combine)
)
except ValueError:
for ds in datasets:
ds.close()
raise
combined._file_obj = _MultiFileCloser(file_objs)
# read global attributes from the attrs_file or from the first dataset
if attrs_file is not None:
if isinstance(attrs_file, Path):
attrs_file = str(attrs_file)
combined.attrs = datasets[paths.index(attrs_file)].attrs
else:
combined.attrs = datasets[0].attrs
return combined
WRITEABLE_STORES: Dict[str, Callable] = {
"netcdf4": backends.NetCDF4DataStore.open,
"scipy": backends.ScipyDataStore,
"h5netcdf": backends.H5NetCDFStore.open,
}
def to_netcdf(
dataset: Dataset,
path_or_file=None,
mode: str = "w",
format: str = None,
group: str = None,
engine: str = None,
encoding: Mapping = None,
unlimited_dims: Iterable[Hashable] = None,
compute: bool = True,
multifile: bool = False,
invalid_netcdf: bool = False,
) -> Union[Tuple[ArrayWriter, AbstractDataStore], bytes, "Delayed", None]:
"""This function creates an appropriate datastore for writing a dataset to
disk as a netCDF file
See `Dataset.to_netcdf` for full API docs.
The ``multifile`` argument is only for the private use of save_mfdataset.
"""
if isinstance(path_or_file, Path):
path_or_file = str(path_or_file)
if encoding is None:
encoding = {}
if path_or_file is None:
if engine is None:
engine = "scipy"
elif engine != "scipy":
raise ValueError(
"invalid engine for creating bytes with "
"to_netcdf: %r. Only the default engine "
"or engine='scipy' is supported" % engine
)
if not compute:
raise NotImplementedError(
"to_netcdf() with compute=False is not yet implemented when "
"returning bytes"
)
elif isinstance(path_or_file, str):
if engine is None:
engine = _get_default_engine(path_or_file)
path_or_file = _normalize_path(path_or_file)
else: # file-like object
engine = "scipy"
# validate Dataset keys, DataArray names, and attr keys/values
_validate_dataset_names(dataset)
_validate_attrs(dataset)
try:
store_open = WRITEABLE_STORES[engine]
except KeyError:
raise ValueError("unrecognized engine for to_netcdf: %r" % engine)
if format is not None:
format = format.upper()
# handle scheduler specific logic
scheduler = _get_scheduler()
have_chunks = any(v.chunks for v in dataset.variables.values())
autoclose = have_chunks and scheduler in ["distributed", "multiprocessing"]
if autoclose and engine == "scipy":
raise NotImplementedError(
"Writing netCDF files with the %s backend "
"is not currently supported with dask's %s "
"scheduler" % (engine, scheduler)
)
target = path_or_file if path_or_file is not None else BytesIO()
kwargs = dict(autoclose=True) if autoclose else {}
if invalid_netcdf:
if engine == "h5netcdf":
kwargs["invalid_netcdf"] = invalid_netcdf
else:
raise ValueError(
"unrecognized option 'invalid_netcdf' for engine %s" % engine
)
store = store_open(target, mode, format, group, **kwargs)
if unlimited_dims is None:
unlimited_dims = dataset.encoding.get("unlimited_dims", None)
if unlimited_dims is not None:
if isinstance(unlimited_dims, str) or not isinstance(unlimited_dims, Iterable):
unlimited_dims = [unlimited_dims]
else:
unlimited_dims = list(unlimited_dims)
writer = ArrayWriter()
# TODO: figure out how to refactor this logic (here and in save_mfdataset)
# to avoid this mess of conditionals
try:
# TODO: allow this work (setting up the file for writing array data)
# to be parallelized with dask
dump_to_store(
dataset, store, writer, encoding=encoding, unlimited_dims=unlimited_dims
)
if autoclose:
store.close()
if multifile:
return writer, store
writes = writer.sync(compute=compute)
if path_or_file is None:
store.sync()
return target.getvalue()
finally:
if not multifile and compute:
store.close()
if not compute:
import dask
return dask.delayed(_finalize_store)(writes, store)
return None
def dump_to_store(
dataset, store, writer=None, encoder=None, encoding=None, unlimited_dims=None
):
"""Store dataset contents to a backends.*DataStore object."""
if writer is None:
writer = ArrayWriter()
if encoding is None:
encoding = {}
variables, attrs = conventions.encode_dataset_coordinates(dataset)
check_encoding = set()
for k, enc in encoding.items():
# no need to shallow copy the variable again; that already happened
# in encode_dataset_coordinates
variables[k].encoding = enc
check_encoding.add(k)
if encoder:
variables, attrs = encoder(variables, attrs)
store.store(variables, attrs, check_encoding, writer, unlimited_dims=unlimited_dims)
def save_mfdataset(
datasets, paths, mode="w", format=None, groups=None, engine=None, compute=True
):
"""Write multiple datasets to disk as netCDF files simultaneously.
This function is intended for use with datasets consisting of dask.array
objects, in which case it can write the multiple datasets to disk
simultaneously using a shared thread pool.
When not using dask, it is no different than calling ``to_netcdf``
repeatedly.
Parameters
----------
datasets : list of xarray.Dataset
List of datasets to save.
paths : list of str or list of Paths
List of paths to which to save each corresponding dataset.
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any existing file at
these locations will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',
'NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
groups : list of str, optional
Paths to the netCDF4 group in each corresponding file to which to save
datasets (only works for format='NETCDF4'). The groups will be created
if necessary.
engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
See `Dataset.to_netcdf` for additional information.
compute: boolean
If true compute immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed later.
Examples
--------
Save a dataset into one netCDF per year of data:
>>> years, datasets = zip(*ds.groupby("time.year"))
>>> paths = ["%s.nc" % y for y in years]
>>> xr.save_mfdataset(datasets, paths)
"""
if mode == "w" and len(set(paths)) < len(paths):
raise ValueError(
"cannot use mode='w' when writing multiple " "datasets to the same path"
)
for obj in datasets:
if not isinstance(obj, Dataset):
raise TypeError(
"save_mfdataset only supports writing Dataset "
"objects, received type %s" % type(obj)
)
if groups is None:
groups = [None] * len(datasets)
if len({len(datasets), len(paths), len(groups)}) > 1:
raise ValueError(
"must supply lists of the same length for the "
"datasets, paths and groups arguments to "
"save_mfdataset"
)
writers, stores = zip(
*[
to_netcdf(
ds, path, mode, format, group, engine, compute=compute, multifile=True
)
for ds, path, group in zip(datasets, paths, groups)
]
)
try:
writes = [w.sync(compute=compute) for w in writers]
finally:
if compute:
for store in stores:
store.close()
if not compute:
import dask
return dask.delayed(
[dask.delayed(_finalize_store)(w, s) for w, s in zip(writes, stores)]
)
def _validate_datatypes_for_zarr_append(dataset):
"""DataArray.name and Dataset keys must be a string or None"""
def check_dtype(var):
if (
not np.issubdtype(var.dtype, np.number)
and not np.issubdtype(var.dtype, np.datetime64)
and not np.issubdtype(var.dtype, np.bool_)
and not coding.strings.is_unicode_dtype(var.dtype)
and not var.dtype == object
):
# and not re.match('^bytes[1-9]+$', var.dtype.name)):
raise ValueError(
"Invalid dtype for data variable: {} "
"dtype must be a subtype of number, "
"datetime, bool, a fixed sized string, "
"a fixed size unicode string or an "
"object".format(var)
)
for k in dataset.data_vars.values():
check_dtype(k)
def _validate_append_dim_and_encoding(
ds_to_append, store, append_dim, encoding, **open_kwargs
):
try:
ds = backends.zarr.open_zarr(store, **open_kwargs)
except ValueError: # store empty
return
if append_dim:
if append_dim not in ds.dims:
raise ValueError(
f"append_dim={append_dim!r} does not match any existing "
f"dataset dimensions {ds.dims}"
)
for var_name in ds_to_append:
if var_name in ds:
if ds_to_append[var_name].dims != ds[var_name].dims:
raise ValueError(
f"variable {var_name!r} already exists with different "
f"dimension names {ds[var_name].dims} != "
f"{ds_to_append[var_name].dims}, but changing variable "
"dimensions is not supported by to_zarr()."
)
existing_sizes = {
k: v for k, v in ds[var_name].sizes.items() if k != append_dim
}
new_sizes = {
k: v for k, v in ds_to_append[var_name].sizes.items() if k != append_dim
}
if existing_sizes != new_sizes:
raise ValueError(
f"variable {var_name!r} already exists with different "
"dimension sizes: {existing_sizes} != {new_sizes}. "
"to_zarr() only supports changing dimension sizes when "
f"explicitly appending, but append_dim={append_dim!r}."
)
if var_name in encoding.keys():
raise ValueError(
f"variable {var_name!r} already exists, but encoding was provided"
)
def to_zarr(
dataset,
store=None,
mode=None,
synchronizer=None,
group=None,
encoding=None,
compute=True,
consolidated=False,
append_dim=None,
):
"""This function creates an appropriate datastore for writing a dataset to
a zarr ztore
See `Dataset.to_zarr` for full API docs.
"""
if isinstance(store, Path):
store = str(store)
if encoding is None:
encoding = {}
# validate Dataset keys, DataArray names, and attr keys/values
_validate_dataset_names(dataset)
_validate_attrs(dataset)
if mode == "a":
_validate_datatypes_for_zarr_append(dataset)
_validate_append_dim_and_encoding(
dataset,
store,
append_dim,
group=group,
consolidated=consolidated,
encoding=encoding,
)
zstore = backends.ZarrStore.open_group(
store=store,
mode=mode,
synchronizer=synchronizer,
group=group,
consolidate_on_close=consolidated,
)
zstore.append_dim = append_dim
writer = ArrayWriter()
# TODO: figure out how to properly handle unlimited_dims
dump_to_store(dataset, zstore, writer, encoding=encoding)
writes = writer.sync(compute=compute)
if compute:
_finalize_store(writes, zstore)
else:
import dask
return dask.delayed(_finalize_store)(writes, zstore)
return zstore
|
shoyer/xarray
|
xarray/backends/api.py
|
Python
|
apache-2.0
| 50,649
|
[
"NetCDF"
] |
6f175f68b5caae8447f3736650244f7121fbe66096e7f34a154b4523c6537052
|
class NodeVisitor(object):
def visit(self, node):
method_name = 'visit_' + type(node).__name__
visitor = getattr(self, method_name, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
raise Exception('No visit_{} method'.format(type(node).__name__))
|
doubledherin/my_compiler
|
node_visitor.py
|
Python
|
mit
| 311
|
[
"VisIt"
] |
93276c3c81f4c00f9fbc3fdea3ff0c42ea60a1adaff4930cb2c655072ac7a274
|
# coding: utf-8
# Copyright (c) Henniggroup.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
"""
reads in vasprun.xml file and plots the density of states
"""
# To use matplotlib on Hipergator, uncomment the following 2 lines:
# import matplotlib
# matplotlib.use('Agg')
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.electronic_structure.plotter import DosPlotter
if __name__ == "__main__":
# readin the density of states from vasprun.xml file
run = Vasprun("vasprun.xml", parse_projected_eigen=True)
complete_dos = run.complete_dos
print('cbm and vbm ', complete_dos.get_cbm_vbm())
print('gap = ', complete_dos.get_gap())
# get orbital projected DOS.
spd_dos = complete_dos.get_spd_dos()
plotter = DosPlotter()
plotter.add_dos_dict(spd_dos)
plotter.save_plot('dos.eps')
|
henniggroup/MPInterfaces
|
examples/dos.py
|
Python
|
mit
| 925
|
[
"VASP",
"pymatgen"
] |
87b90121fe71e412a3136e6814ff0acbf71b6da8d661dfed291f9f65850ccaaa
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
from .externals.six import string_types
import numpy as np
import os
import os.path as op
from scipy import sparse, linalg
from scipy.spatial.distance import cdist
from copy import deepcopy
from .io.constants import FIFF
from .io.tree import dir_tree_find
from .io.tag import find_tag, read_tag
from .io.open import fiff_open
from .io.write import (start_block, end_block, write_int,
write_float_sparse_rcs, write_string,
write_float_matrix, write_int_matrix,
write_coord_trans, start_file, end_file, write_id)
from .surface import (read_surface, _create_surf_spacing, _get_ico_surface,
_tessellate_sphere_surf, read_bem_surfaces,
_read_surface_geom, _normalize_vectors,
_complete_surface_info, _compute_nearest,
fast_cross_3d)
from .source_estimate import mesh_dist
from .utils import (get_subjects_dir, run_subprocess, has_freesurfer,
has_nibabel, check_fname, logger, verbose,
check_scipy_version)
from .fixes import in1d, partial, gzip_open
from .parallel import parallel_func, check_n_jobs
from .transforms import (invert_transform, apply_trans, _print_coord_trans,
combine_transforms, _get_mri_head_t_from_trans_file,
read_trans, _coord_frame_name)
def _get_lut():
"""Helper to get the FreeSurfer LUT"""
data_dir = op.join(op.dirname(__file__), 'data')
lut_fname = op.join(data_dir, 'FreeSurferColorLUT.txt')
return np.genfromtxt(lut_fname, dtype=None,
usecols=(0, 1), names=['id', 'name'])
def _get_lut_id(lut, label, use_lut):
"""Helper to convert a label to a LUT ID number"""
if not use_lut:
return 1
assert isinstance(label, string_types)
mask = (lut['name'] == label.encode('utf-8'))
assert mask.sum() == 1
return lut['id'][mask]
class SourceSpaces(list):
"""Represent a list of source space
Currently implemented as a list of dictionaries containing the source
space information
Parameters
----------
source_spaces : list
A list of dictionaries containing the source space information.
info : dict
Dictionary with information about the creation of the source space
file. Has keys 'working_dir' and 'command_line'.
Attributes
----------
info : dict
Dictionary with information about the creation of the source space
file. Has keys 'working_dir' and 'command_line'.
"""
def __init__(self, source_spaces, info=None):
super(SourceSpaces, self).__init__(source_spaces)
if info is None:
self.info = dict()
else:
self.info = dict(info)
def __repr__(self):
ss_repr = []
for ss in self:
ss_type = ss['type']
if ss_type == 'vol':
if 'seg_name' in ss:
r = ("'vol' (%s), n_used=%i"
% (ss['seg_name'], ss['nuse']))
else:
r = ("'vol', shape=%s, n_used=%i"
% (repr(ss['shape']), ss['nuse']))
elif ss_type == 'surf':
r = "'surf', n_vertices=%i, n_used=%i" % (ss['np'], ss['nuse'])
else:
r = "%r" % ss_type
coord_frame = ss['coord_frame']
if isinstance(coord_frame, np.ndarray):
coord_frame = coord_frame[0]
r += ', coordinate_frame=%s' % _coord_frame_name(coord_frame)
ss_repr.append('<%s>' % r)
ss_repr = ', '.join(ss_repr)
return "<SourceSpaces: [{ss}]>".format(ss=ss_repr)
def __add__(self, other):
return SourceSpaces(list.__add__(self, other))
def copy(self):
"""Make a copy of the source spaces
Returns
-------
src : instance of SourceSpaces
The copied source spaces.
"""
src = deepcopy(self)
return src
def save(self, fname):
"""Save the source spaces to a fif file
Parameters
----------
fname : str
File to write.
"""
write_source_spaces(fname, self)
@verbose
def export_volume(self, fname, include_surfaces=True,
include_discrete=True, dest='mri', trans=None,
mri_resolution=False, use_lut=True, verbose=None):
"""Exports source spaces to nifti or mgz file
Parameters
----------
fname : str
Name of nifti or mgz file to write.
include_surfaces : bool
If True, include surface source spaces.
include_discrete : bool
If True, include discrete source spaces.
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of the
original T1 image. If 'surf' the coordinate system of the
FreeSurfer surface is used (Surface RAS).
trans : dict, str, or None
Either a transformation filename (usually made using mne_analyze)
or an info dict (usually opened using read_trans()).
If string, an ending of `.fif` or `.fif.gz` will be assumed to be
in FIF format, any other ending will be assumed to be a text file
with a 4x4 transformation matrix (like the `--trans` MNE-C option.
Must be provided if source spaces are in head coordinates and
include_surfaces and mri_resolution are True.
mri_resolution : bool
If True, the image is saved in MRI resolution
(e.g. 256 x 256 x 256).
use_lut : bool
If True, assigns a numeric value to each source space that
corresponds to a color on the freesurfer lookup table.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Notes
-----
This method requires nibabel.
"""
# import nibabel or raise error
try:
import nibabel as nib
except ImportError:
raise ImportError('This function requires nibabel.')
# Check coordinate frames of each source space
coord_frames = np.array([s['coord_frame'] for s in self])
# Raise error if trans is not provided when head coordinates are used
# and mri_resolution and include_surfaces are true
if (coord_frames == FIFF.FIFFV_COORD_HEAD).all():
coords = 'head' # all sources in head coordinates
if mri_resolution and include_surfaces:
if trans is None:
raise ValueError('trans containing mri to head transform '
'must be provided if mri_resolution and '
'include_surfaces are true and surfaces '
'are in head coordinates')
elif trans is not None:
logger.info('trans is not needed and will not be used unless '
'include_surfaces and mri_resolution are True.')
elif (coord_frames == FIFF.FIFFV_COORD_MRI).all():
coords = 'mri' # all sources in mri coordinates
if trans is not None:
logger.info('trans is not needed and will not be used unless '
'sources are in head coordinates.')
# Raise error if all sources are not in the same space, or sources are
# not in mri or head coordinates
else:
raise ValueError('All sources must be in head coordinates or all '
'sources must be in mri coordinates.')
# use lookup table to assign values to source spaces
logger.info('Reading FreeSurfer lookup table')
# read the lookup table
lut = _get_lut()
# Setup a dictionary of source types
src_types = dict(volume=[], surface=[], discrete=[])
# Populate dictionary of source types
for src in self:
# volume sources
if src['type'] == 'vol':
src_types['volume'].append(src)
# surface sources
elif src['type'] == 'surf':
src_types['surface'].append(src)
# discrete sources
elif src['type'] == 'discrete':
src_types['discrete'].append(src)
# raise an error if dealing with source type other than volume
# surface or discrete
else:
raise ValueError('Unrecognized source type: %s.' % src['type'])
# Get shape, inuse array and interpolation matrix from volume sources
first_vol = True # mark the first volume source
# Loop through the volume sources
for vs in src_types['volume']:
# read the lookup table value for segmented volume
if 'seg_name' not in vs:
raise ValueError('Volume sources should be segments, '
'not the entire volume.')
# find the color value for this volume
i = _get_lut_id(lut, vs['seg_name'], use_lut)
if first_vol:
# get the inuse array
if mri_resolution:
# read the mri file used to generate volumes
aseg = nib.load(vs['mri_file'])
# get the voxel space shape
shape3d = (vs['mri_height'], vs['mri_depth'],
vs['mri_width'])
# get the values for this volume
inuse = i * (aseg.get_data() == i).astype(int)
# store as 1D array
inuse = inuse.ravel((2, 1, 0))
else:
inuse = i * vs['inuse']
# get the volume source space shape
shape = vs['shape']
# read the shape in reverse order
# (otherwise results are scrambled)
shape3d = (shape[2], shape[1], shape[0])
first_vol = False
else:
# update the inuse array
if mri_resolution:
# get the values for this volume
use = i * (aseg.get_data() == i).astype(int)
inuse += use.ravel((2, 1, 0))
else:
inuse += i * vs['inuse']
# Raise error if there are no volume source spaces
if first_vol:
raise ValueError('Source spaces must contain at least one volume.')
# create 3d grid in the MRI_VOXEL coordinate frame
# len of inuse array should match shape regardless of mri_resolution
assert len(inuse) == np.prod(shape3d)
# setup the image in 3d space
img = inuse.reshape(shape3d).T
# include surface and/or discrete source spaces
if include_surfaces or include_discrete:
# setup affine transform for source spaces
if mri_resolution:
# get the MRI to MRI_VOXEL transform
affine = invert_transform(vs['vox_mri_t'])
else:
# get the MRI to SOURCE (MRI_VOXEL) transform
affine = invert_transform(vs['src_mri_t'])
# modify affine if in head coordinates
if coords == 'head':
# read transformation
if isinstance(trans, string_types):
if not op.isfile(trans):
raise IOError('trans file "%s" not found' % trans)
if op.splitext(trans)[1] in ['.fif', '.gz']:
mri_head_t = read_trans(trans)
else:
mri_head_t = _get_mri_head_t_from_trans_file(trans)
else: # dict
mri_head_t = trans
# make sure its an MRI to HEAD transform
if mri_head_t['from'] == FIFF.FIFFV_COORD_HEAD:
mri_head_t = invert_transform(mri_head_t)
if not (mri_head_t['from'] == FIFF.FIFFV_COORD_MRI and
mri_head_t['to'] == FIFF.FIFFV_COORD_HEAD):
raise RuntimeError('Incorrect MRI transform provided')
# get the HEAD to MRI transform
head_mri_t = invert_transform(mri_head_t)
# combine transforms, from HEAD to MRI_VOXEL
affine = combine_transforms(head_mri_t, affine,
FIFF.FIFFV_COORD_HEAD,
FIFF.FIFFV_MNE_COORD_MRI_VOXEL)
# loop through the surface source spaces
if include_surfaces:
# get the surface names (assumes left, right order. may want
# to add these names during source space generation
surf_names = ['Left-Cerebral-Cortex', 'Right-Cerebral-Cortex']
for i, surf in enumerate(src_types['surface']):
# convert vertex positions from their native space
# (either HEAD or MRI) to MRI_VOXEL space
srf_rr = apply_trans(affine['trans'], surf['rr'])
# convert to numeric indices
ix_orig, iy_orig, iz_orig = srf_rr.T.round().astype(int)
# clip indices outside of volume space
ix_clip = np.maximum(np.minimum(ix_orig, shape3d[2]-1), 0)
iy_clip = np.maximum(np.minimum(iy_orig, shape3d[1]-1), 0)
iz_clip = np.maximum(np.minimum(iz_orig, shape3d[0]-1), 0)
# compare original and clipped indices
n_diff = np.array((ix_orig != ix_clip, iy_orig != iy_clip,
iz_orig != iz_clip)).any(0).sum()
# generate use warnings for clipping
if n_diff > 0:
logger.warning('%s surface vertices lay outside '
'of volume space. Consider using a '
'larger volume space.' % n_diff)
# get surface id or use default value
i = _get_lut_id(lut, surf_names[i], use_lut)
# update image to include surface voxels
img[ix_clip, iy_clip, iz_clip] = i
# loop through discrete source spaces
if include_discrete:
for i, disc in enumerate(src_types['discrete']):
# convert vertex positions from their native space
# (either HEAD or MRI) to MRI_VOXEL space
disc_rr = apply_trans(affine['trans'], disc['rr'])
# convert to numeric indices
ix_orig, iy_orig, iz_orig = disc_rr.T.astype(int)
# clip indices outside of volume space
ix_clip = np.maximum(np.minimum(ix_orig, shape3d[2]-1), 0)
iy_clip = np.maximum(np.minimum(iy_orig, shape3d[1]-1), 0)
iz_clip = np.maximum(np.minimum(iz_orig, shape3d[0]-1), 0)
# compare original and clipped indices
n_diff = np.array((ix_orig != ix_clip, iy_orig != iy_clip,
iz_orig != iz_clip)).any(0).sum()
# generate use warnings for clipping
if n_diff > 0:
logger.warning('%s discrete vertices lay outside '
'of volume space. Consider using a '
'larger volume space.' % n_diff)
# set default value
img[ix_clip, iy_clip, iz_clip] = 1
if use_lut:
logger.info('Discrete sources do not have values on '
'the lookup table. Defaulting to 1.')
# calculate affine transform for image (MRI_VOXEL to RAS)
if mri_resolution:
# MRI_VOXEL to MRI transform
transform = vs['vox_mri_t'].copy()
else:
# MRI_VOXEL to MRI transform
# NOTE: 'src' indicates downsampled version of MRI_VOXEL
transform = vs['src_mri_t'].copy()
if dest == 'mri':
# combine with MRI to RAS transform
transform = combine_transforms(transform, vs['mri_ras_t'],
transform['from'],
vs['mri_ras_t']['to'])
# now setup the affine for volume image
affine = transform['trans']
# make sure affine converts from m to mm
affine[:3] *= 1e3
# save volume data
# setup image for file
if fname.endswith(('.nii', '.nii.gz')): # save as nifit
# setup the nifti header
hdr = nib.Nifti1Header()
hdr.set_xyzt_units('mm')
# save the nifti image
img = nib.Nifti1Image(img, affine, header=hdr)
elif fname.endswith('.mgz'): # save as mgh
# convert to float32 (float64 not currently supported)
img = img.astype('float32')
# save the mgh image
img = nib.freesurfer.mghformat.MGHImage(img, affine)
else:
raise(ValueError('Unrecognized file extension'))
# write image to file
nib.save(img, fname)
def _add_patch_info(s):
"""Patch information in a source space
Generate the patch information from the 'nearest' vector in
a source space. For vertex in the source space it provides
the list of neighboring vertices in the high resolution
triangulation.
Parameters
----------
s : dict
The source space.
"""
nearest = s['nearest']
if nearest is None:
s['pinfo'] = None
s['patch_inds'] = None
return
logger.info(' Computing patch statistics...')
indn = np.argsort(nearest)
nearest_sorted = nearest[indn]
steps = np.where(nearest_sorted[1:] != nearest_sorted[:-1])[0] + 1
starti = np.r_[[0], steps]
stopi = np.r_[steps, [len(nearest)]]
pinfo = list()
for start, stop in zip(starti, stopi):
pinfo.append(np.sort(indn[start:stop]))
s['pinfo'] = pinfo
# compute patch indices of the in-use source space vertices
patch_verts = nearest_sorted[steps - 1]
s['patch_inds'] = np.searchsorted(patch_verts, s['vertno'])
logger.info(' Patch information added...')
@verbose
def _read_source_spaces_from_tree(fid, tree, add_geom=False, verbose=None):
"""Read the source spaces from a FIF file
Parameters
----------
fid : file descriptor
An open file descriptor.
tree : dict
The FIF tree structure if source is a file id.
add_geom : bool, optional (default False)
Add geometry information to the surfaces.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
src : SourceSpaces
The source spaces.
"""
# Find all source spaces
spaces = dir_tree_find(tree, FIFF.FIFFB_MNE_SOURCE_SPACE)
if len(spaces) == 0:
raise ValueError('No source spaces found')
src = list()
for s in spaces:
logger.info(' Reading a source space...')
this = _read_one_source_space(fid, s)
logger.info(' [done]')
if add_geom:
_complete_source_space_info(this)
src.append(this)
src = SourceSpaces(src)
logger.info(' %d source spaces read' % len(spaces))
return src
@verbose
def read_source_spaces(fname, add_geom=False, verbose=None):
"""Read the source spaces from a FIF file
Parameters
----------
fname : str
The name of the file, which should end with -src.fif or
-src.fif.gz.
add_geom : bool, optional (default False)
Add geometry information to the surfaces.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
src : SourceSpaces
The source spaces.
"""
# be more permissive on read than write (fwd/inv can contain src)
check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz',
'-fwd.fif', '-fwd.fif.gz',
'-inv.fif', '-inv.fif.gz'))
ff, tree, _ = fiff_open(fname)
with ff as fid:
src = _read_source_spaces_from_tree(fid, tree, add_geom=add_geom,
verbose=verbose)
src.info['fname'] = fname
node = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)
if node:
node = node[0]
for p in range(node['nent']):
kind = node['directory'][p].kind
pos = node['directory'][p].pos
tag = read_tag(fid, pos)
if kind == FIFF.FIFF_MNE_ENV_WORKING_DIR:
src.info['working_dir'] = tag.data
elif kind == FIFF.FIFF_MNE_ENV_COMMAND_LINE:
src.info['command_line'] = tag.data
return src
@verbose
def _read_one_source_space(fid, this, verbose=None):
"""Read one source space
"""
FIFF_BEM_SURF_NTRI = 3104
FIFF_BEM_SURF_TRIANGLES = 3106
res = dict()
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_ID)
if tag is None:
res['id'] = int(FIFF.FIFFV_MNE_SURF_UNKNOWN)
else:
res['id'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE)
if tag is None:
raise ValueError('Unknown source space type')
else:
src_type = int(tag.data)
if src_type == FIFF.FIFFV_MNE_SPACE_SURFACE:
res['type'] = 'surf'
elif src_type == FIFF.FIFFV_MNE_SPACE_VOLUME:
res['type'] = 'vol'
elif src_type == FIFF.FIFFV_MNE_SPACE_DISCRETE:
res['type'] = 'discrete'
else:
raise ValueError('Unknown source space type (%d)' % src_type)
if res['type'] == 'vol':
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS)
if tag is not None:
res['shape'] = tuple(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_COORD_TRANS)
if tag is not None:
res['src_mri_t'] = tag.data
parent_mri = dir_tree_find(this, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
if len(parent_mri) == 0:
# MNE 2.7.3 (and earlier) didn't store necessary information
# about volume coordinate translations. Although there is a
# FFIF_COORD_TRANS in the higher level of the FIFF file, this
# doesn't contain all the info we need. Safer to return an
# error unless a user really wants us to add backward compat.
raise ValueError('Can not find parent MRI location. The volume '
'source space may have been made with an MNE '
'version that is too old (<= 2.7.3). Consider '
'updating and regenerating the inverse.')
mri = parent_mri[0]
for d in mri['directory']:
if d.kind == FIFF.FIFF_COORD_TRANS:
tag = read_tag(fid, d.pos)
trans = tag.data
if trans['from'] == FIFF.FIFFV_MNE_COORD_MRI_VOXEL:
res['vox_mri_t'] = tag.data
if trans['to'] == FIFF.FIFFV_MNE_COORD_RAS:
res['mri_ras_t'] = tag.data
tag = find_tag(fid, mri, FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR)
if tag is not None:
res['interpolator'] = tag.data
else:
logger.info("Interpolation matrix for MRI not found.")
tag = find_tag(fid, mri, FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE)
if tag is not None:
res['mri_file'] = tag.data
tag = find_tag(fid, mri, FIFF.FIFF_MRI_WIDTH)
if tag is not None:
res['mri_width'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MRI_HEIGHT)
if tag is not None:
res['mri_height'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MRI_DEPTH)
if tag is not None:
res['mri_depth'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MNE_FILE_NAME)
if tag is not None:
res['mri_volume_name'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS)
if tag is not None:
nneighbors = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS)
offset = 0
neighbors = []
for n in nneighbors:
neighbors.append(tag.data[offset:offset+n])
offset += n
res['neighbor_vert'] = neighbors
tag = find_tag(fid, this, FIFF.FIFF_COMMENT)
if tag is not None:
res['seg_name'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
if tag is None:
raise ValueError('Number of vertices not found')
res['np'] = int(tag.data)
tag = find_tag(fid, this, FIFF_BEM_SURF_NTRI)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NTRI)
if tag is None:
res['ntri'] = 0
else:
res['ntri'] = int(tag.data)
else:
res['ntri'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)
if tag is None:
raise ValueError('Coordinate frame information not found')
res['coord_frame'] = tag.data
# Vertices, normals, and triangles
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS)
if tag is None:
raise ValueError('Vertex data not found')
res['rr'] = tag.data.astype(np.float) # double precision for mayavi
if res['rr'].shape[0] != res['np']:
raise ValueError('Vertex information is incorrect')
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)
if tag is None:
raise ValueError('Vertex normals not found')
res['nn'] = tag.data
if res['nn'].shape[0] != res['np']:
raise ValueError('Vertex normal information is incorrect')
if res['ntri'] > 0:
tag = find_tag(fid, this, FIFF_BEM_SURF_TRIANGLES)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES)
if tag is None:
raise ValueError('Triangulation not found')
else:
res['tris'] = tag.data - 1 # index start at 0 in Python
else:
res['tris'] = tag.data - 1 # index start at 0 in Python
if res['tris'].shape[0] != res['ntri']:
raise ValueError('Triangulation information is incorrect')
else:
res['tris'] = None
# Which vertices are active
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE)
if tag is None:
res['nuse'] = 0
res['inuse'] = np.zeros(res['nuse'], dtype=np.int)
res['vertno'] = None
else:
res['nuse'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION)
if tag is None:
raise ValueError('Source selection information missing')
res['inuse'] = tag.data.astype(np.int).T
if len(res['inuse']) != res['np']:
raise ValueError('Incorrect number of entries in source space '
'selection')
res['vertno'] = np.where(res['inuse'])[0]
# Use triangulation
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES)
if tag1 is None or tag2 is None:
res['nuse_tri'] = 0
res['use_tris'] = None
else:
res['nuse_tri'] = tag1.data
res['use_tris'] = tag2.data - 1 # index start at 0 in Python
# Patch-related information
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST)
if tag1 is None or tag2 is None:
res['nearest'] = None
res['nearest_dist'] = None
else:
res['nearest'] = tag1.data
res['nearest_dist'] = tag2.data.T
_add_patch_info(res)
# Distances
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_DIST)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT)
if tag1 is None or tag2 is None:
res['dist'] = None
res['dist_limit'] = None
else:
res['dist'] = tag1.data
res['dist_limit'] = tag2.data
# Add the upper triangle
res['dist'] = res['dist'] + res['dist'].T
if (res['dist'] is not None):
logger.info(' Distance information added...')
tag = find_tag(fid, this, FIFF.FIFF_SUBJ_HIS_ID)
if tag is not None:
res['subject_his_id'] = tag.data
return res
@verbose
def _complete_source_space_info(this, verbose=None):
"""Add more info on surface
"""
# Main triangulation
logger.info(' Completing triangulation info...')
this['tri_area'] = np.zeros(this['ntri'])
r1 = this['rr'][this['tris'][:, 0], :]
r2 = this['rr'][this['tris'][:, 1], :]
r3 = this['rr'][this['tris'][:, 2], :]
this['tri_cent'] = (r1 + r2 + r3) / 3.0
this['tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
size = np.sqrt(np.sum(this['tri_nn'] ** 2, axis=1))
this['tri_area'] = size / 2.0
this['tri_nn'] /= size[:, None]
logger.info('[done]')
# Selected triangles
logger.info(' Completing selection triangulation info...')
if this['nuse_tri'] > 0:
r1 = this['rr'][this['use_tris'][:, 0], :]
r2 = this['rr'][this['use_tris'][:, 1], :]
r3 = this['rr'][this['use_tris'][:, 2], :]
this['use_tri_cent'] = (r1 + r2 + r3) / 3.0
this['use_tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
this['use_tri_area'] = np.sqrt(np.sum(this['use_tri_nn'] ** 2, axis=1)
) / 2.0
logger.info('[done]')
def find_source_space_hemi(src):
"""Return the hemisphere id for a source space
Parameters
----------
src : dict
The source space to investigate
Returns
-------
hemi : int
Deduced hemisphere id
"""
xave = src['rr'][:, 0].sum()
if xave < 0:
hemi = int(FIFF.FIFFV_MNE_SURF_LEFT_HEMI)
else:
hemi = int(FIFF.FIFFV_MNE_SURF_RIGHT_HEMI)
return hemi
def label_src_vertno_sel(label, src):
""" Find vertex numbers and indices from label
Parameters
----------
label : Label
Source space label
src : dict
Source space
Returns
-------
vertno : list of length 2
Vertex numbers for lh and rh
src_sel : array of int (len(idx) = len(vertno[0]) + len(vertno[1]))
Indices of the selected vertices in sourse space
"""
if src[0]['type'] != 'surf':
return Exception('Labels are only supported with surface source '
'spaces')
vertno = [src[0]['vertno'], src[1]['vertno']]
if label.hemi == 'lh':
vertno_sel = np.intersect1d(vertno[0], label.vertices)
src_sel = np.searchsorted(vertno[0], vertno_sel)
vertno[0] = vertno_sel
vertno[1] = np.array([])
elif label.hemi == 'rh':
vertno_sel = np.intersect1d(vertno[1], label.vertices)
src_sel = np.searchsorted(vertno[1], vertno_sel) + len(vertno[0])
vertno[0] = np.array([])
vertno[1] = vertno_sel
elif label.hemi == 'both':
vertno_sel_lh = np.intersect1d(vertno[0], label.lh.vertices)
src_sel_lh = np.searchsorted(vertno[0], vertno_sel_lh)
vertno_sel_rh = np.intersect1d(vertno[1], label.rh.vertices)
src_sel_rh = np.searchsorted(vertno[1], vertno_sel_rh) + len(vertno[0])
src_sel = np.hstack((src_sel_lh, src_sel_rh))
vertno = [vertno_sel_lh, vertno_sel_rh]
else:
raise Exception("Unknown hemisphere type")
return vertno, src_sel
def _get_vertno(src):
return [s['vertno'] for s in src]
###############################################################################
# Write routines
@verbose
def _write_source_spaces_to_fid(fid, src, verbose=None):
"""Write the source spaces to a FIF file
Parameters
----------
fid : file descriptor
An open file descriptor.
src : list
The list of source spaces.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
for s in src:
logger.info(' Write a source space...')
start_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
_write_one_source_space(fid, s, verbose)
end_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
logger.info(' [done]')
logger.info(' %d source spaces written' % len(src))
@verbose
def write_source_spaces(fname, src, verbose=None):
"""Write source spaces to a file
Parameters
----------
fname : str
The name of the file, which should end with -src.fif or
-src.fif.gz.
src : SourceSpaces
The source spaces (as returned by read_source_spaces).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz'))
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_MNE)
if src.info:
start_block(fid, FIFF.FIFFB_MNE_ENV)
write_id(fid, FIFF.FIFF_BLOCK_ID)
data = src.info.get('working_dir', None)
if data:
write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data)
data = src.info.get('command_line', None)
if data:
write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data)
end_block(fid, FIFF.FIFFB_MNE_ENV)
_write_source_spaces_to_fid(fid, src, verbose)
end_block(fid, FIFF.FIFFB_MNE)
end_file(fid)
def _write_one_source_space(fid, this, verbose=None):
"""Write one source space"""
if this['type'] == 'surf':
src_type = FIFF.FIFFV_MNE_SPACE_SURFACE
elif this['type'] == 'vol':
src_type = FIFF.FIFFV_MNE_SPACE_VOLUME
elif this['type'] == 'discrete':
src_type = FIFF.FIFFV_MNE_SPACE_DISCRETE
else:
raise ValueError('Unknown source space type (%s)' % this['type'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE, src_type)
if this['id'] >= 0:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_ID, this['id'])
data = this.get('subject_his_id', None)
if data:
write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, data)
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, this['coord_frame'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, this['np'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS, this['rr'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS, this['nn'])
# Which vertices are active
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION, this['inuse'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE, this['nuse'])
if this['ntri'] > 0:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NTRI, this['ntri'])
write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES,
this['tris'] + 1)
if this['type'] != 'vol' and this['use_tris'] is not None:
# Use triangulation
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI, this['nuse_tri'])
write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES,
this['use_tris'] + 1)
if this['type'] == 'vol':
neighbor_vert = this.get('neighbor_vert', None)
if neighbor_vert is not None:
nneighbors = np.array([len(n) for n in neighbor_vert])
neighbors = np.concatenate(neighbor_vert)
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS, nneighbors)
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS, neighbors)
write_coord_trans(fid, this['src_mri_t'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS, this['shape'])
start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
write_coord_trans(fid, this['mri_ras_t'])
write_coord_trans(fid, this['vox_mri_t'])
mri_volume_name = this.get('mri_volume_name', None)
if mri_volume_name is not None:
write_string(fid, FIFF.FIFF_MNE_FILE_NAME, mri_volume_name)
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR,
this['interpolator'])
if 'mri_file' in this and this['mri_file'] is not None:
write_string(fid, FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE,
this['mri_file'])
write_int(fid, FIFF.FIFF_MRI_WIDTH, this['mri_width'])
write_int(fid, FIFF.FIFF_MRI_HEIGHT, this['mri_height'])
write_int(fid, FIFF.FIFF_MRI_DEPTH, this['mri_depth'])
end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
# Patch-related information
if this['nearest'] is not None:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST, this['nearest'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST,
this['nearest_dist'])
# Distances
if this['dist'] is not None:
# Save only upper triangular portion of the matrix
dists = this['dist'].copy()
dists = sparse.triu(dists, format=dists.format)
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST, dists)
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT,
this['dist_limit'])
# Segmentation data
if this['type'] == 'vol' and ('seg_name' in this):
# Save the name of the segment
write_string(fid, FIFF.FIFF_COMMENT, this['seg_name'])
##############################################################################
# Surface to MNI conversion
@verbose
def vertex_to_mni(vertices, hemis, subject, subjects_dir=None, mode=None,
verbose=None):
"""Convert the array of vertices for a hemisphere to MNI coordinates
Parameters
----------
vertices : int, or list of int
Vertex number(s) to convert
hemis : int, or list of int
Hemisphere(s) the vertices belong to
subject : string
Name of the subject to load surfaces from.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
mode : string | None
Either 'nibabel' or 'freesurfer' for the software to use to
obtain the transforms. If None, 'nibabel' is tried first, falling
back to 'freesurfer' if it fails. Results should be equivalent with
either option, but nibabel may be quicker (and more pythonic).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
coordinates : n_vertices x 3 array of float
The MNI coordinates (in mm) of the vertices
Notes
-----
This function requires either nibabel (in Python) or Freesurfer
(with utility "mri_info") to be correctly installed.
"""
if not has_freesurfer() and not has_nibabel():
raise RuntimeError('NiBabel (Python) or Freesurfer (Unix) must be '
'correctly installed and accessible from Python')
if not isinstance(vertices, list) and not isinstance(vertices, np.ndarray):
vertices = [vertices]
if not isinstance(hemis, list) and not isinstance(hemis, np.ndarray):
hemis = [hemis] * len(vertices)
if not len(hemis) == len(vertices):
raise ValueError('hemi and vertices must match in length')
subjects_dir = get_subjects_dir(subjects_dir)
surfs = [op.join(subjects_dir, subject, 'surf', '%s.white' % h)
for h in ['lh', 'rh']]
# read surface locations in MRI space
rr = [read_surface(s)[0] for s in surfs]
# take point locations in MRI space and convert to MNI coordinates
xfm = _read_talxfm(subject, subjects_dir, mode)
data = np.array([rr[h][v, :] for h, v in zip(hemis, vertices)])
return apply_trans(xfm['trans'], data)
@verbose
def _read_talxfm(subject, subjects_dir, mode=None, verbose=None):
"""Read MNI transform from FreeSurfer talairach.xfm file
Adapted from freesurfer m-files. Altered to deal with Norig
and Torig correctly.
"""
if mode is not None and mode not in ['nibabel', 'freesurfer']:
raise ValueError('mode must be "nibabel" or "freesurfer"')
fname = op.join(subjects_dir, subject, 'mri', 'transforms',
'talairach.xfm')
# read the RAS to MNI transform from talairach.xfm
with open(fname, 'r') as fid:
logger.debug('Reading FreeSurfer talairach.xfm file:\n%s' % fname)
# read lines until we get the string 'Linear_Transform', which precedes
# the data transformation matrix
got_it = False
comp = 'Linear_Transform'
for line in fid:
if line[:len(comp)] == comp:
# we have the right line, so don't read any more
got_it = True
break
if got_it:
xfm = list()
# read the transformation matrix (3x4)
for ii, line in enumerate(fid):
digs = [float(s) for s in line.strip('\n;').split()]
xfm.append(digs)
if ii == 2:
break
xfm.append([0., 0., 0., 1.])
xfm = np.array(xfm, dtype=float)
else:
raise ValueError('failed to find \'Linear_Transform\' string in '
'xfm file:\n%s' % fname)
# Setup the RAS to MNI transform
ras_mni_t = {'from': FIFF.FIFFV_MNE_COORD_RAS,
'to': FIFF.FIFFV_MNE_COORD_MNI_TAL, 'trans': xfm}
# now get Norig and Torig
# (i.e. vox_ras_t and vox_mri_t, respectively)
path = op.join(subjects_dir, subject, 'mri', 'orig.mgz')
if not op.isfile(path):
path = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
if not op.isfile(path):
raise IOError('mri not found: %s' % path)
if has_nibabel():
use_nibabel = True
else:
use_nibabel = False
if mode == 'nibabel':
raise ImportError('Tried to import nibabel but failed, try using '
'mode=None or mode=Freesurfer')
# note that if mode == None, then we default to using nibabel
if use_nibabel is True and mode == 'freesurfer':
use_nibabel = False
if use_nibabel:
import nibabel as nib
img = nib.load(path)
hdr = img.get_header()
# read the MRI_VOXEL to RAS transform
n_orig = hdr.get_vox2ras()
# read the MRI_VOXEL to MRI transform
ds = np.array(hdr.get_zooms())
ns = (np.array(hdr.get_data_shape()[:3]) * ds) / 2.0
t_orig = np.array([[-ds[0], 0, 0, ns[0]],
[0, 0, ds[2], -ns[2]],
[0, -ds[1], 0, ns[1]],
[0, 0, 0, 1]], dtype=float)
nt_orig = [n_orig, t_orig]
else:
nt_orig = list()
for conv in ['--vox2ras', '--vox2ras-tkr']:
stdout, stderr = run_subprocess(['mri_info', conv, path])
stdout = np.fromstring(stdout, sep=' ').astype(float)
if not stdout.size == 16:
raise ValueError('Could not parse Freesurfer mri_info output')
nt_orig.append(stdout.reshape(4, 4))
# extract the MRI_VOXEL to RAS transform
n_orig = nt_orig[0]
vox_ras_t = {'from': FIFF.FIFFV_MNE_COORD_MRI_VOXEL,
'to': FIFF.FIFFV_MNE_COORD_RAS,
'trans': n_orig}
# extract the MRI_VOXEL to MRI transform
t_orig = nt_orig[1]
vox_mri_t = {'from': FIFF.FIFFV_MNE_COORD_MRI_VOXEL,
'to': FIFF.FIFFV_COORD_MRI,
'trans': t_orig}
# invert MRI_VOXEL to MRI to get the MRI to MRI_VOXEL transform
mri_vox_t = invert_transform(vox_mri_t)
# construct an MRI to RAS transform
mri_ras_t = combine_transforms(mri_vox_t, vox_ras_t,
FIFF.FIFFV_COORD_MRI,
FIFF.FIFFV_MNE_COORD_RAS)
# construct the MRI to MNI transform
mri_mni_t = combine_transforms(mri_ras_t, ras_mni_t,
FIFF.FIFFV_COORD_MRI,
FIFF.FIFFV_MNE_COORD_MNI_TAL)
return mri_mni_t
###############################################################################
# Creation and decimation
@verbose
def setup_source_space(subject, fname=True, spacing='oct6', surface='white',
overwrite=False, subjects_dir=None, add_dist=True,
n_jobs=1, verbose=None):
"""Setup a source space with subsampling
Parameters
----------
subject : str
Subject to process.
fname : str | None | bool
Filename to use. If True, a default name will be used. If None,
the source space will not be saved (only returned).
spacing : str
The spacing to use. Can be ``'ico#'`` for a recursively subdivided
icosahedron, ``'oct#'`` for a recursively subdivided octahedron,
or ``'all'`` for all points.
surface : str
The surface to use.
overwrite: bool
If True, overwrite output file (if it exists).
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
add_dist : bool
Add distance and patch information to the source space. This takes some
time so precomputing it is recommended.
n_jobs : int
Number of jobs to run in parallel. Will use at most 2 jobs
(one for each hemisphere).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
src : list
The source space for each hemisphere.
"""
cmd = ('setup_source_space(%s, fname=%s, spacing=%s, surface=%s, '
'overwrite=%s, subjects_dir=%s, add_dist=%s, verbose=%s)'
% (subject, fname, spacing, surface, overwrite,
subjects_dir, add_dist, verbose))
# check to make sure our parameters are good, parse 'spacing'
space_err = ('"spacing" must be a string with values '
'"ico#", "oct#", or "all", and "ico" and "oct"'
'numbers must be integers')
if not isinstance(spacing, string_types) or len(spacing) < 3:
raise ValueError(space_err)
if spacing == 'all':
stype = 'all'
sval = ''
elif spacing[:3] == 'ico':
stype = 'ico'
sval = spacing[3:]
elif spacing[:3] == 'oct':
stype = 'oct'
sval = spacing[3:]
else:
raise ValueError(space_err)
try:
if stype in ['ico', 'oct']:
sval = int(sval)
elif stype == 'spacing': # spacing
sval = float(sval)
except:
raise ValueError(space_err)
subjects_dir = get_subjects_dir(subjects_dir)
surfs = [op.join(subjects_dir, subject, 'surf', hemi + surface)
for hemi in ['lh.', 'rh.']]
bem_dir = op.join(subjects_dir, subject, 'bem')
for surf, hemi in zip(surfs, ['LH', 'RH']):
if surf is not None and not op.isfile(surf):
raise IOError('Could not find the %s surface %s'
% (hemi, surf))
if not (fname is True or fname is None or isinstance(fname, string_types)):
raise ValueError('"fname" must be a string, True, or None')
if fname is True:
extra = '%s-%s' % (stype, sval) if sval != '' else stype
fname = op.join(bem_dir, '%s-%s-src.fif' % (subject, extra))
if fname is not None and op.isfile(fname) and overwrite is False:
raise IOError('file "%s" exists, use overwrite=True if you want '
'to overwrite the file' % fname)
logger.info('Setting up the source space with the following parameters:\n')
logger.info('SUBJECTS_DIR = %s' % subjects_dir)
logger.info('Subject = %s' % subject)
logger.info('Surface = %s' % surface)
if stype == 'ico':
src_type_str = 'ico = %s' % sval
logger.info('Icosahedron subdivision grade %s\n' % sval)
elif stype == 'oct':
src_type_str = 'oct = %s' % sval
logger.info('Octahedron subdivision grade %s\n' % sval)
else:
src_type_str = 'all'
logger.info('Include all vertices\n')
# Create the fif file
if fname is not None:
logger.info('>>> 1. Creating the source space file %s...' % fname)
else:
logger.info('>>> 1. Creating the source space...\n')
# mne_make_source_space ... actually make the source spaces
src = []
# pre-load ico/oct surf (once) for speed, if necessary
if stype in ['ico', 'oct']:
# ### from mne_ico_downsample.c ###
if stype == 'ico':
logger.info('Doing the icosahedral vertex picking...')
ico_surf = _get_ico_surface(sval)
else:
logger.info('Doing the octahedral vertex picking...')
ico_surf = _tessellate_sphere_surf(sval)
else:
ico_surf = None
for hemi, surf in zip(['lh', 'rh'], surfs):
logger.info('Loading %s...' % surf)
# Setup the surface spacing in the MRI coord frame
s = _create_surf_spacing(surf, hemi, subject, stype, sval, ico_surf,
subjects_dir)
logger.info('loaded %s %d/%d selected to source space (%s)'
% (op.split(surf)[1], s['nuse'], s['np'], src_type_str))
src.append(s)
logger.info('') # newline after both subject types are run
# Fill in source space info
hemi_ids = [FIFF.FIFFV_MNE_SURF_LEFT_HEMI, FIFF.FIFFV_MNE_SURF_RIGHT_HEMI]
for s, s_id in zip(src, hemi_ids):
# Add missing fields
s.update(dict(dist=None, dist_limit=None, nearest=None, type='surf',
nearest_dist=None, pinfo=None, patch_inds=None, id=s_id,
coord_frame=np.array((FIFF.FIFFV_COORD_MRI,), np.int32)))
s['rr'] /= 1000.0
del s['tri_area']
del s['tri_cent']
del s['tri_nn']
del s['neighbor_tri']
# upconvert to object format from lists
src = SourceSpaces(src, dict(working_dir=os.getcwd(), command_line=cmd))
if add_dist:
add_source_space_distances(src, n_jobs=n_jobs, verbose=verbose)
# write out if requested, then return the data
if fname is not None:
write_source_spaces(fname, src)
logger.info('Wrote %s' % fname)
logger.info('You are now one step closer to computing the gain matrix')
return src
@verbose
def setup_volume_source_space(subject, fname=None, pos=5.0, mri=None,
sphere=(0.0, 0.0, 0.0, 90.0), bem=None,
surface=None, mindist=5.0, exclude=0.0,
overwrite=False, subjects_dir=None,
volume_label=None, add_interpolator=True,
verbose=None):
"""Setup a volume source space with grid spacing or discrete source space
Parameters
----------
subject : str
Subject to process.
fname : str | None
Filename to use. If None, the source space will not be saved
(only returned).
pos : float | dict
Positions to use for sources. If float, a grid will be constructed
with the spacing given by `pos` in mm, generating a volume source
space. If dict, pos['rr'] and pos['nn'] will be used as the source
space locations (in meters) and normals, respectively, creating a
discrete source space. NOTE: For a discrete source space (`pos` is
a dict), `mri` must be None.
mri : str | None
The filename of an MRI volume (mgh or mgz) to create the
interpolation matrix over. Source estimates obtained in the
volume source space can then be morphed onto the MRI volume
using this interpolator. If pos is a dict, this can be None.
sphere : array_like (length 4)
Define spherical source space bounds using origin and radius given
by (ox, oy, oz, rad) in mm. Only used if `bem` and `surface` are
both None.
bem : str | None
Define source space bounds using a BEM file (specifically the inner
skull surface).
surface : str | dict | None
Define source space bounds using a FreeSurfer surface file. Can
also be a dictionary with entries `'rr'` and `'tris'`, such as
those returned by `read_surface()`.
mindist : float
Exclude points closer than this distance (mm) to the bounding surface.
exclude : float
Exclude points closer than this distance (mm) from the center of mass
of the bounding surface.
overwrite: bool
If True, overwrite output file (if it exists).
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
volume_label : str | None
Region of interest corresponding with freesurfer lookup table.
add_interpolator : bool
If True and ``mri`` is not None, then an interpolation matrix
will be produced.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
src : list
The source space. Note that this list will have length 1 for
compatibility reasons, as most functions expect source spaces
to be provided as lists).
Notes
-----
To create a discrete source space, `pos` must be a dict, 'mri' must be
None, and 'volume_label' must be None. To create a whole brain volume
source space, `pos` must be a float and 'mri' must be provided. To create
a volume source space from label, 'pos' must be a float, 'volume_label'
must be provided, and 'mri' must refer to a .mgh or .mgz file with values
corresponding to the freesurfer lookup-table (typically aseg.mgz).
"""
subjects_dir = get_subjects_dir(subjects_dir)
if bem is not None and surface is not None:
raise ValueError('Only one of "bem" and "surface" should be '
'specified')
if mri is not None:
if not op.isfile(mri):
raise IOError('mri file "%s" not found' % mri)
if isinstance(pos, dict):
raise ValueError('Cannot create interpolation matrix for '
'discrete source space, mri must be None if '
'pos is a dict')
elif not isinstance(pos, dict):
# "pos" will create a discrete src, so we don't need "mri"
# if "pos" is None, we must have "mri" b/c it will be vol src
raise RuntimeError('"mri" must be provided if "pos" is not a dict '
'(i.e., if a volume instead of discrete source '
'space is desired)')
if volume_label is not None:
if isinstance(pos, dict):
raise ValueError('If volume label is not none, pos must be '
'float, not a dict.')
# Check that volume label is found in .mgz file
volume_labels = get_volume_labels_from_aseg(mri)
if volume_label not in volume_labels:
raise ValueError('Volume %s not found in file %s. Double check '
'freesurfer lookup table.' % (volume_label, mri))
sphere = np.asarray(sphere)
if sphere.size != 4:
raise ValueError('"sphere" must be array_like with 4 elements')
# triage bounding argument
if bem is not None:
logger.info('BEM file : %s', bem)
elif surface is not None:
if isinstance(surface, dict):
if not all([key in surface for key in ['rr', 'tris']]):
raise KeyError('surface, if dict, must have entries "rr" '
'and "tris"')
# let's make sure we have geom info
surface = _read_surface_geom(surface, verbose=False)
surf_extra = 'dict()'
elif isinstance(surface, string_types):
if not op.isfile(surface):
raise IOError('surface file "%s" not found' % surface)
surf_extra = surface
logger.info('Boundary surface file : %s', surf_extra)
else:
logger.info('Sphere : origin at (%.1f %.1f %.1f) mm'
% (sphere[0], sphere[1], sphere[2]))
logger.info(' radius : %.1f mm' % sphere[3])
# triage pos argument
if isinstance(pos, dict):
if not all([key in pos for key in ['rr', 'nn']]):
raise KeyError('pos, if dict, must contain "rr" and "nn"')
pos_extra = 'dict()'
else: # pos should be float-like
try:
pos = float(pos)
except (TypeError, ValueError):
raise ValueError('pos must be a dict, or something that can be '
'cast to float()')
if not isinstance(pos, float):
logger.info('Source location file : %s', pos_extra)
logger.info('Assuming input in millimeters')
logger.info('Assuming input in MRI coordinates')
logger.info('Output file : %s', fname)
if isinstance(pos, float):
logger.info('grid : %.1f mm' % pos)
logger.info('mindist : %.1f mm' % mindist)
pos /= 1000.0 # convert pos from m to mm
if exclude > 0.0:
logger.info('Exclude : %.1f mm' % exclude)
if mri is not None:
logger.info('MRI volume : %s' % mri)
exclude /= 1000.0 # convert exclude from m to mm
logger.info('')
# Explicit list of points
if not isinstance(pos, float):
# Make the grid of sources
sp = _make_discrete_source_space(pos)
else:
# Load the brain surface as a template
if bem is not None:
# read bem surface in the MRI coordinate frame
surf = read_bem_surfaces(bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN,
verbose=False)
logger.info('Loaded inner skull from %s (%d nodes)'
% (bem, surf['np']))
elif surface is not None:
if isinstance(surface, string_types):
# read the surface in the MRI coordinate frame
surf = _read_surface_geom(surface)
else:
surf = surface
logger.info('Loaded bounding surface from %s (%d nodes)'
% (surface, surf['np']))
surf = deepcopy(surf)
surf['rr'] *= 1e-3 # must be converted to meters
else: # Load an icosahedron and use that as the surface
logger.info('Setting up the sphere...')
surf = _get_ico_surface(3)
# Scale and shift
# center at origin and make radius 1
_normalize_vectors(surf['rr'])
# normalize to sphere (in MRI coord frame)
surf['rr'] *= sphere[3] / 1000.0 # scale by radius
surf['rr'] += sphere[:3] / 1000.0 # move by center
_complete_surface_info(surf, True)
# Make the grid of sources in MRI space
sp = _make_volume_source_space(surf, pos, exclude, mindist, mri,
volume_label)
# Compute an interpolation matrix to show data in MRI_VOXEL coord frame
if mri is not None:
_add_interpolator(sp, mri, add_interpolator)
if 'vol_dims' in sp:
del sp['vol_dims']
# Save it
sp.update(dict(nearest=None, dist=None, use_tris=None, patch_inds=None,
dist_limit=None, pinfo=None, ntri=0, nearest_dist=None,
nuse_tri=0, tris=None))
sp = SourceSpaces([sp], dict(working_dir=os.getcwd(), command_line='None'))
if fname is not None:
write_source_spaces(fname, sp, verbose=False)
return sp
def _make_voxel_ras_trans(move, ras, voxel_size):
"""Make a transformation from MRI_VOXEL to MRI surface RAS (i.e. MRI)"""
assert voxel_size.ndim == 1
assert voxel_size.size == 3
rot = ras.T * voxel_size[np.newaxis, :]
assert rot.ndim == 2
assert rot.shape[0] == 3
assert rot.shape[1] == 3
trans = np.c_[np.r_[rot, np.zeros((1, 3))], np.r_[move, 1.0]]
t = {'from': FIFF.FIFFV_MNE_COORD_MRI_VOXEL, 'to': FIFF.FIFFV_COORD_MRI,
'trans': trans}
return t
def _make_discrete_source_space(pos):
"""Use a discrete set of source locs/oris to make src space
Parameters
----------
pos : dict
Must have entries "rr" and "nn". Data should be in meters.
Returns
-------
src : dict
The source space.
"""
# process points
rr = pos['rr'].copy()
nn = pos['nn'].copy()
if not (rr.ndim == nn.ndim == 2 and nn.shape[0] == nn.shape[0] and
rr.shape[1] == nn.shape[1]):
raise RuntimeError('"rr" and "nn" must both be 2D arrays with '
'the same number of rows and 3 columns')
npts = rr.shape[0]
_normalize_vectors(nn)
nz = np.sum(np.sum(nn * nn, axis=1) == 0)
if nz != 0:
raise RuntimeError('%d sources have zero length normal' % nz)
logger.info('Positions (in meters) and orientations')
logger.info('%d sources' % npts)
# Ready to make the source space
coord_frame = FIFF.FIFFV_COORD_MRI
sp = dict(coord_frame=coord_frame, type='discrete', nuse=npts, np=npts,
inuse=np.ones(npts, int), vertno=np.arange(npts), rr=rr, nn=nn,
id=-1)
return sp
def _make_volume_source_space(surf, grid, exclude, mindist, mri, volume_label):
"""Make a source space which covers the volume bounded by surf"""
# Figure out the grid size in the MRI coordinate frame
mins = np.min(surf['rr'], axis=0)
maxs = np.max(surf['rr'], axis=0)
cm = np.mean(surf['rr'], axis=0) # center of mass
# Define the sphere which fits the surface
maxdist = np.sqrt(np.max(np.sum((surf['rr'] - cm) ** 2, axis=1)))
logger.info('Surface CM = (%6.1f %6.1f %6.1f) mm'
% (1000 * cm[0], 1000 * cm[1], 1000 * cm[2]))
logger.info('Surface fits inside a sphere with radius %6.1f mm'
% (1000 * maxdist))
logger.info('Surface extent:')
for c, mi, ma in zip('xyz', mins, maxs):
logger.info(' %s = %6.1f ... %6.1f mm' % (c, 1000 * mi, 1000 * ma))
maxn = np.zeros(3, int)
minn = np.zeros(3, int)
for c in range(3):
if maxs[c] > 0:
maxn[c] = np.floor(np.abs(maxs[c]) / grid) + 1
else:
maxn[c] = -np.floor(np.abs(maxs[c]) / grid) - 1
if mins[c] > 0:
minn[c] = np.floor(np.abs(mins[c]) / grid) + 1
else:
minn[c] = -np.floor(np.abs(mins[c]) / grid) - 1
logger.info('Grid extent:')
for c, mi, ma in zip('xyz', minn, maxn):
logger.info(' %s = %6.1f ... %6.1f mm'
% (c, 1000 * mi * grid, 1000 * ma * grid))
# Now make the initial grid
ns = maxn - minn + 1
npts = np.prod(ns)
nrow = ns[0]
ncol = ns[1]
nplane = nrow * ncol
sp = dict(np=npts, rr=np.zeros((npts, 3)), nn=np.zeros((npts, 3)),
inuse=np.ones(npts, int), type='vol', nuse=npts,
coord_frame=FIFF.FIFFV_COORD_MRI, id=-1, shape=ns)
sp['nn'][:, 2] = 1.0 # Source orientation is immaterial
x = np.arange(minn[0], maxn[0] + 1)[np.newaxis, np.newaxis, :]
y = np.arange(minn[1], maxn[1] + 1)[np.newaxis, :, np.newaxis]
z = np.arange(minn[2], maxn[2] + 1)[:, np.newaxis, np.newaxis]
z = np.tile(z, (1, ns[1], ns[0])).ravel()
y = np.tile(y, (ns[2], 1, ns[0])).ravel()
x = np.tile(x, (ns[2], ns[1], 1)).ravel()
k = np.arange(npts)
sp['rr'] = np.c_[x, y, z] * grid
neigh = np.empty((26, npts), int)
neigh.fill(-1)
# Figure out each neighborhood:
# 6-neighborhood first
idxs = [z > minn[2], x < maxn[0], y < maxn[1],
x > minn[0], y > minn[1], z < maxn[2]]
offsets = [-nplane, 1, nrow, -1, -nrow, nplane]
for n, idx, offset in zip(neigh[:6], idxs, offsets):
n[idx] = k[idx] + offset
# Then the rest to complete the 26-neighborhood
# First the plane below
idx1 = z > minn[2]
idx2 = np.logical_and(idx1, x < maxn[0])
neigh[6, idx2] = k[idx2] + 1 - nplane
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[7, idx3] = k[idx3] + 1 + nrow - nplane
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[8, idx2] = k[idx2] + nrow - nplane
idx2 = np.logical_and(idx1, x > minn[0])
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[9, idx3] = k[idx3] - 1 + nrow - nplane
neigh[10, idx2] = k[idx2] - 1 - nplane
idx3 = np.logical_and(idx2, y > minn[1])
neigh[11, idx3] = k[idx3] - 1 - nrow - nplane
idx2 = np.logical_and(idx1, y > minn[1])
neigh[12, idx2] = k[idx2] - nrow - nplane
idx3 = np.logical_and(idx2, x < maxn[0])
neigh[13, idx3] = k[idx3] + 1 - nrow - nplane
# Then the same plane
idx1 = np.logical_and(x < maxn[0], y < maxn[1])
neigh[14, idx1] = k[idx1] + 1 + nrow
idx1 = x > minn[0]
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[15, idx2] = k[idx2] - 1 + nrow
idx2 = np.logical_and(idx1, y > minn[1])
neigh[16, idx2] = k[idx2] - 1 - nrow
idx1 = np.logical_and(y > minn[1], x < maxn[0])
neigh[17, idx1] = k[idx1] + 1 - nrow - nplane
# Finally one plane above
idx1 = z < maxn[2]
idx2 = np.logical_and(idx1, x < maxn[0])
neigh[18, idx2] = k[idx2] + 1 + nplane
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[19, idx3] = k[idx3] + 1 + nrow + nplane
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[20, idx2] = k[idx2] + nrow + nplane
idx2 = np.logical_and(idx1, x > minn[0])
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[21, idx3] = k[idx3] - 1 + nrow + nplane
neigh[22, idx2] = k[idx2] - 1 + nplane
idx3 = np.logical_and(idx2, y > minn[1])
neigh[23, idx3] = k[idx3] - 1 - nrow + nplane
idx2 = np.logical_and(idx1, y > minn[1])
neigh[24, idx2] = k[idx2] - nrow + nplane
idx3 = np.logical_and(idx2, x < maxn[0])
neigh[25, idx3] = k[idx3] + 1 - nrow + nplane
logger.info('%d sources before omitting any.', sp['nuse'])
# Exclude infeasible points
dists = np.sqrt(np.sum((sp['rr'] - cm) ** 2, axis=1))
bads = np.where(np.logical_or(dists < exclude, dists > maxdist))[0]
sp['inuse'][bads] = False
sp['nuse'] -= len(bads)
logger.info('%d sources after omitting infeasible sources.', sp['nuse'])
_filter_source_spaces(surf, mindist, None, [sp])
logger.info('%d sources remaining after excluding the sources outside '
'the surface and less than %6.1f mm inside.'
% (sp['nuse'], mindist))
# Restrict sources to volume of interest
if volume_label is not None:
try:
import nibabel as nib
except ImportError:
raise ImportError("nibabel is required to read segmentation file.")
logger.info('Selecting voxels from %s' % volume_label)
# Read the segmentation data using nibabel
mgz = nib.load(mri)
mgz_data = mgz.get_data()
# Get the numeric index for this volume label
lut = _get_lut()
vol_id = _get_lut_id(lut, volume_label, True)
# Get indices for this volume label in voxel space
vox_bool = mgz_data == vol_id
# Get the 3 dimensional indices in voxel space
vox_xyz = np.array(np.where(vox_bool)).T
# Transform to RAS coordinates
# (use tkr normalization or volume won't align with surface sources)
trans = _get_mgz_header(mri)['vox2ras_tkr']
# Convert transform from mm to m
trans[:3] /= 1000.
rr_voi = apply_trans(trans, vox_xyz) # positions of VOI in RAS space
# Filter out points too far from volume region voxels
dists = _compute_nearest(rr_voi, sp['rr'], return_dists=True)[1]
# Maximum distance from center of mass of a voxel to any of its corners
maxdist = np.sqrt(((trans[:3, :3].sum(0) / 2.) ** 2).sum())
bads = np.where(dists > maxdist)[0]
# Update source info
sp['inuse'][bads] = False
sp['vertno'] = np.where(sp['inuse'] > 0)[0]
sp['nuse'] = len(sp['vertno'])
sp['seg_name'] = volume_label
sp['mri_file'] = mri
# Update log
logger.info('%d sources remaining after excluding sources too far '
'from VOI voxels', sp['nuse'])
# Omit unused vertices from the neighborhoods
logger.info('Adjusting the neighborhood info...')
# remove non source-space points
log_inuse = sp['inuse'] > 0
neigh[:, np.logical_not(log_inuse)] = -1
# remove these points from neigh
vertno = np.where(log_inuse)[0]
sp['vertno'] = vertno
old_shape = neigh.shape
neigh = neigh.ravel()
checks = np.where(neigh >= 0)[0]
removes = np.logical_not(in1d(checks, vertno))
neigh[checks[removes]] = -1
neigh.shape = old_shape
neigh = neigh.T
# Thought we would need this, but C code keeps -1 vertices, so we will:
# neigh = [n[n >= 0] for n in enumerate(neigh[vertno])]
sp['neighbor_vert'] = neigh
# Set up the volume data (needed for creating the interpolation matrix)
r0 = minn * grid
voxel_size = grid * np.ones(3)
ras = np.eye(3)
sp['src_mri_t'] = _make_voxel_ras_trans(r0, ras, voxel_size)
sp['vol_dims'] = maxn - minn + 1
return sp
def _vol_vertex(width, height, jj, kk, pp):
return jj + width * kk + pp * (width * height)
def _get_mgz_header(fname):
"""Adapted from nibabel to quickly extract header info"""
if not fname.endswith('.mgz'):
raise IOError('Filename must end with .mgz')
header_dtd = [('version', '>i4'), ('dims', '>i4', (4,)),
('type', '>i4'), ('dof', '>i4'), ('goodRASFlag', '>i2'),
('delta', '>f4', (3,)), ('Mdc', '>f4', (3, 3)),
('Pxyz_c', '>f4', (3,))]
header_dtype = np.dtype(header_dtd)
with gzip_open(fname, 'rb') as fid:
hdr_str = fid.read(header_dtype.itemsize)
header = np.ndarray(shape=(), dtype=header_dtype,
buffer=hdr_str)
# dims
dims = header['dims'].astype(int)
dims = dims[:3] if len(dims) == 4 else dims
# vox2ras_tkr
delta = header['delta']
ds = np.array(delta, float)
ns = np.array(dims * ds) / 2.0
v2rtkr = np.array([[-ds[0], 0, 0, ns[0]],
[0, 0, ds[2], -ns[2]],
[0, -ds[1], 0, ns[1]],
[0, 0, 0, 1]], dtype=np.float32)
# ras2vox
d = np.diag(delta)
pcrs_c = dims / 2.0
Mdc = header['Mdc'].T
pxyz_0 = header['Pxyz_c'] - np.dot(Mdc, np.dot(d, pcrs_c))
M = np.eye(4, 4)
M[0:3, 0:3] = np.dot(Mdc, d)
M[0:3, 3] = pxyz_0.T
M = linalg.inv(M)
header = dict(dims=dims, vox2ras_tkr=v2rtkr, ras2vox=M)
return header
def _add_interpolator(s, mri_name, add_interpolator):
"""Compute a sparse matrix to interpolate the data into an MRI volume"""
# extract transformation information from mri
logger.info('Reading %s...' % mri_name)
header = _get_mgz_header(mri_name)
mri_width, mri_height, mri_depth = header['dims']
s.update(dict(mri_width=mri_width, mri_height=mri_height,
mri_depth=mri_depth))
trans = header['vox2ras_tkr'].copy()
trans[:3, :] /= 1000.0
s['vox_mri_t'] = {'trans': trans, 'from': FIFF.FIFFV_MNE_COORD_MRI_VOXEL,
'to': FIFF.FIFFV_COORD_MRI} # ras_tkr
trans = linalg.inv(np.dot(header['vox2ras_tkr'], header['ras2vox']))
trans[:3, 3] /= 1000.0
s['mri_ras_t'] = {'trans': trans, 'from': FIFF.FIFFV_COORD_MRI,
'to': FIFF.FIFFV_MNE_COORD_RAS} # ras
s['mri_volume_name'] = mri_name
nvox = mri_width * mri_height * mri_depth
if not add_interpolator:
s['interpolator'] = sparse.csr_matrix((nvox, s['np']))
return
_print_coord_trans(s['src_mri_t'], 'Source space : ')
_print_coord_trans(s['vox_mri_t'], 'MRI volume : ')
_print_coord_trans(s['mri_ras_t'], 'MRI volume : ')
#
# Convert MRI voxels from destination (MRI volume) to source (volume
# source space subset) coordinates
#
combo_trans = combine_transforms(s['vox_mri_t'],
invert_transform(s['src_mri_t']),
FIFF.FIFFV_MNE_COORD_MRI_VOXEL,
FIFF.FIFFV_MNE_COORD_MRI_VOXEL)
combo_trans['trans'] = combo_trans['trans'].astype(np.float32)
logger.info('Setting up interpolation...')
# Loop over slices to save (lots of) memory
# Note that it is the slowest incrementing index
# This is equivalent to using mgrid and reshaping, but faster
data = []
indices = []
indptr = np.zeros(nvox + 1, np.int32)
for p in range(mri_depth):
js = np.arange(mri_width, dtype=np.float32)
js = np.tile(js[np.newaxis, :],
(mri_height, 1)).ravel()
ks = np.arange(mri_height, dtype=np.float32)
ks = np.tile(ks[:, np.newaxis],
(1, mri_width)).ravel()
ps = np.empty((mri_height, mri_width), np.float32).ravel()
ps.fill(p)
r0 = np.c_[js, ks, ps]
del js, ks, ps
# Transform our vertices from their MRI space into our source space's
# frame (this is labeled as FIFFV_MNE_COORD_MRI_VOXEL, but it's
# really a subset of the entire volume!)
r0 = apply_trans(combo_trans['trans'], r0)
rn = np.floor(r0).astype(int)
maxs = (s['vol_dims'] - 1)[np.newaxis, :]
good = np.where(np.logical_and(np.all(rn >= 0, axis=1),
np.all(rn < maxs, axis=1)))[0]
rn = rn[good]
r0 = r0[good]
# now we take each MRI voxel *in this space*, and figure out how
# to make its value the weighted sum of voxels in the volume source
# space. This is a 3D weighting scheme based (presumably) on the
# fact that we know we're interpolating from one volumetric grid
# into another.
jj = rn[:, 0]
kk = rn[:, 1]
pp = rn[:, 2]
vss = np.empty((len(jj), 8), np.int32)
width = s['vol_dims'][0]
height = s['vol_dims'][1]
jjp1 = jj + 1
kkp1 = kk + 1
ppp1 = pp + 1
vss[:, 0] = _vol_vertex(width, height, jj, kk, pp)
vss[:, 1] = _vol_vertex(width, height, jjp1, kk, pp)
vss[:, 2] = _vol_vertex(width, height, jjp1, kkp1, pp)
vss[:, 3] = _vol_vertex(width, height, jj, kkp1, pp)
vss[:, 4] = _vol_vertex(width, height, jj, kk, ppp1)
vss[:, 5] = _vol_vertex(width, height, jjp1, kk, ppp1)
vss[:, 6] = _vol_vertex(width, height, jjp1, kkp1, ppp1)
vss[:, 7] = _vol_vertex(width, height, jj, kkp1, ppp1)
del jj, kk, pp, jjp1, kkp1, ppp1
uses = np.any(s['inuse'][vss], axis=1)
if uses.size == 0:
continue
vss = vss[uses].ravel() # vertex (col) numbers in csr matrix
indices.append(vss)
indptr[good[uses] + p * mri_height * mri_width + 1] = 8
del vss
# figure out weights for each vertex
r0 = r0[uses]
rn = rn[uses]
del uses, good
xf = r0[:, 0] - rn[:, 0].astype(np.float32)
yf = r0[:, 1] - rn[:, 1].astype(np.float32)
zf = r0[:, 2] - rn[:, 2].astype(np.float32)
omxf = 1.0 - xf
omyf = 1.0 - yf
omzf = 1.0 - zf
# each entry in the concatenation corresponds to a row of vss
data.append(np.array([omxf * omyf * omzf,
xf * omyf * omzf,
xf * yf * omzf,
omxf * yf * omzf,
omxf * omyf * zf,
xf * omyf * zf,
xf * yf * zf,
omxf * yf * zf], order='F').T.ravel())
del xf, yf, zf, omxf, omyf, omzf
# Compose the sparse matrix
indptr = np.cumsum(indptr, out=indptr)
indices = np.concatenate(indices)
data = np.concatenate(data)
s['interpolator'] = sparse.csr_matrix((data, indices, indptr),
shape=(nvox, s['np']))
logger.info(' %d/%d nonzero values [done]' % (len(data), nvox))
@verbose
def _filter_source_spaces(surf, limit, mri_head_t, src, n_jobs=1,
verbose=None):
"""Remove all source space points closer than a given limit"""
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD and mri_head_t is None:
raise RuntimeError('Source spaces are in head coordinates and no '
'coordinate transform was provided!')
# How close are the source points to the surface?
out_str = 'Source spaces are in '
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
inv_trans = invert_transform(mri_head_t)
out_str += 'head coordinates.'
elif src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI:
out_str += 'MRI coordinates.'
else:
out_str += 'unknown (%d) coordinates.' % src[0]['coord_frame']
logger.info(out_str)
out_str = 'Checking that the sources are inside the bounding surface'
if limit > 0.0:
out_str += ' and at least %6.1f mm away' % (limit)
logger.info(out_str + ' (will take a few...)')
for s in src:
vertno = np.where(s['inuse'])[0] # can't trust s['vertno'] this deep
# Convert all points here first to save time
r1s = s['rr'][vertno]
if s['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
r1s = apply_trans(inv_trans['trans'], r1s)
# Check that the source is inside surface (often the inner skull)
x = _sum_solids_div(r1s, surf, n_jobs)
outside = np.abs(x - 1.0) > 1e-5
omit_outside = np.sum(outside)
# vectorized nearest using BallTree (or cdist)
omit = 0
if limit > 0.0:
dists = _compute_nearest(surf['rr'], r1s, return_dists=True)[1]
close = np.logical_and(dists < limit / 1000.0,
np.logical_not(outside))
omit = np.sum(close)
outside = np.logical_or(outside, close)
s['inuse'][vertno[outside]] = False
s['nuse'] -= (omit + omit_outside)
s['vertno'] = np.where(s['inuse'])[0]
if omit_outside > 0:
extras = [omit_outside]
extras += ['s', 'they are'] if omit_outside > 1 else ['', 'it is']
logger.info('%d source space point%s omitted because %s '
'outside the inner skull surface.' % tuple(extras))
if omit > 0:
extras = [omit]
extras += ['s'] if omit_outside > 1 else ['']
extras += [limit]
logger.info('%d source space point%s omitted because of the '
'%6.1f-mm distance limit.' % tuple(extras))
logger.info('Thank you for waiting.')
def _sum_solids_div(fros, surf, n_jobs):
"""Compute sum of solid angles according to van Oosterom for all tris"""
parallel, p_fun, _ = parallel_func(_get_solids, n_jobs)
tot_angles = parallel(p_fun(surf['rr'][tris], fros)
for tris in np.array_split(surf['tris'], n_jobs))
return np.sum(tot_angles, axis=0) / (2 * np.pi)
def _get_solids(tri_rrs, fros):
"""Helper for computing _sum_solids_div total angle in chunks"""
# NOTE: This incorporates the division by 4PI that used to be separate
tot_angle = np.zeros((len(fros)))
for tri_rr in tri_rrs:
v1 = fros - tri_rr[0]
v2 = fros - tri_rr[1]
v3 = fros - tri_rr[2]
triple = np.sum(fast_cross_3d(v1, v2) * v3, axis=1)
l1 = np.sqrt(np.sum(v1 * v1, axis=1))
l2 = np.sqrt(np.sum(v2 * v2, axis=1))
l3 = np.sqrt(np.sum(v3 * v3, axis=1))
s = (l1 * l2 * l3 +
np.sum(v1 * v2, axis=1) * l3 +
np.sum(v1 * v3, axis=1) * l2 +
np.sum(v2 * v3, axis=1) * l1)
tot_angle -= np.arctan2(triple, s)
return tot_angle
@verbose
def add_source_space_distances(src, dist_limit=np.inf, n_jobs=1, verbose=None):
"""Compute inter-source distances along the cortical surface
This function will also try to add patch info for the source space.
It will only occur if the ``dist_limit`` is sufficiently high that all
points on the surface are within ``dist_limit`` of a point in the
source space.
Parameters
----------
src : instance of SourceSpaces
The source spaces to compute distances for.
dist_limit : float
The upper limit of distances to include (in meters).
Note: if limit < np.inf, scipy > 0.13 (bleeding edge as of
10/2013) must be installed.
n_jobs : int
Number of jobs to run in parallel. Will only use (up to) as many
cores as there are source spaces.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
src : instance of SourceSpaces
The original source spaces, with distance information added.
The distances are stored in src[n]['dist'].
Note: this function operates in-place.
Notes
-----
Requires scipy >= 0.11 (> 0.13 for `dist_limit < np.inf`).
This function can be memory- and CPU-intensive. On a high-end machine
(2012) running 6 jobs in parallel, an ico-5 (10242 per hemi) source space
takes about 10 minutes to compute all distances (`dist_limit = np.inf`).
With `dist_limit = 0.007`, computing distances takes about 1 minute.
We recommend computing distances once per source space and then saving
the source space to disk, as the computed distances will automatically be
stored along with the source space data for future use.
"""
n_jobs = check_n_jobs(n_jobs)
if not isinstance(src, SourceSpaces):
raise ValueError('"src" must be an instance of SourceSpaces')
if not np.isscalar(dist_limit):
raise ValueError('limit must be a scalar, got %s' % repr(dist_limit))
if not check_scipy_version('0.11'):
raise RuntimeError('scipy >= 0.11 must be installed (or > 0.13 '
'if dist_limit < np.inf')
if not all([s['type'] == 'surf' for s in src]):
raise RuntimeError('Currently all source spaces must be of surface '
'type')
if dist_limit < np.inf:
# can't do introspection on dijkstra function because it's Cython,
# so we'll just try quickly here
try:
sparse.csgraph.dijkstra(sparse.csr_matrix(np.zeros((2, 2))),
limit=1.0)
except TypeError:
raise RuntimeError('Cannot use "limit < np.inf" unless scipy '
'> 0.13 is installed')
parallel, p_fun, _ = parallel_func(_do_src_distances, n_jobs)
min_dists = list()
min_idxs = list()
logger.info('Calculating source space distances (limit=%s mm)...'
% (1000 * dist_limit))
for s in src:
connectivity = mesh_dist(s['tris'], s['rr'])
d = parallel(p_fun(connectivity, s['vertno'], r, dist_limit)
for r in np.array_split(np.arange(len(s['vertno'])),
n_jobs))
# deal with indexing so we can add patch info
min_idx = np.array([dd[1] for dd in d])
min_dist = np.array([dd[2] for dd in d])
midx = np.argmin(min_dist, axis=0)
range_idx = np.arange(len(s['rr']))
min_dist = min_dist[midx, range_idx]
min_idx = min_idx[midx, range_idx]
min_dists.append(min_dist)
min_idxs.append(min_idx)
# now actually deal with distances, convert to sparse representation
d = np.concatenate([dd[0] for dd in d], axis=0)
i, j = np.meshgrid(s['vertno'], s['vertno'])
d = d.ravel()
i = i.ravel()
j = j.ravel()
idx = d > 0
d = sparse.csr_matrix((d[idx], (i[idx], j[idx])),
shape=(s['np'], s['np']), dtype=np.float32)
s['dist'] = d
s['dist_limit'] = np.array([dist_limit], np.float32)
# Let's see if our distance was sufficient to allow for patch info
if not any([np.any(np.isinf(md)) for md in min_dists]):
# Patch info can be added!
for s, min_dist, min_idx in zip(src, min_dists, min_idxs):
s['nearest'] = min_idx
s['nearest_dist'] = min_dist
_add_patch_info(s)
else:
logger.info('Not adding patch information, dist_limit too small')
return src
def _do_src_distances(con, vertno, run_inds, limit):
"""Helper to compute source space distances in chunks"""
if limit < np.inf:
func = partial(sparse.csgraph.dijkstra, limit=limit)
else:
func = sparse.csgraph.dijkstra
chunk_size = 100 # save memory by chunking (only a little slower)
lims = np.r_[np.arange(0, len(run_inds), chunk_size), len(run_inds)]
n_chunks = len(lims) - 1
d = np.empty((len(run_inds), len(vertno)))
min_dist = np.empty((n_chunks, con.shape[0]))
min_idx = np.empty((n_chunks, con.shape[0]), np.int32)
range_idx = np.arange(con.shape[0])
for li, (l1, l2) in enumerate(zip(lims[:-1], lims[1:])):
idx = vertno[run_inds[l1:l2]]
out = func(con, indices=idx)
midx = np.argmin(out, axis=0)
min_idx[li] = idx[midx]
min_dist[li] = out[midx, range_idx]
d[l1:l2] = out[:, vertno]
midx = np.argmin(min_dist, axis=0)
min_dist = min_dist[midx, range_idx]
min_idx = min_idx[midx, range_idx]
d[d == np.inf] = 0 # scipy will give us np.inf for uncalc. distances
return d, min_idx, min_dist
def get_volume_labels_from_aseg(mgz_fname):
"""Returns a list of names of segmented volumes.
Parameters
----------
mgz_fname : str
Filename to read. Typically aseg.mgz or some variant in the freesurfer
pipeline.
Returns
-------
label_names : list of str
The names of segmented volumes included in this mgz file.
"""
import nibabel as nib
# Read the mgz file using nibabel
mgz_data = nib.load(mgz_fname).get_data()
# Get the unique label names
lut = _get_lut()
label_names = [lut[lut['id'] == ii]['name'][0].decode('utf-8')
for ii in np.unique(mgz_data)]
label_names = sorted(label_names, key=lambda n: n.lower())
return label_names
def _compare_source_spaces(src0, src1, mode='exact'):
"""Compare two source spaces
Note: this function is also used by forward/tests/test_make_forward.py
"""
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_allclose, assert_array_equal
if mode != 'exact' and 'approx' not in mode: # 'nointerp' can be appended
raise RuntimeError('unknown mode %s' % mode)
for s0, s1 in zip(src0, src1):
for name in ['nuse', 'ntri', 'np', 'type', 'id']:
assert_equal(s0[name], s1[name], name)
for name in ['subject_his_id']:
if name in s0 or name in s1:
assert_equal(s0[name], s1[name], name)
for name in ['interpolator']:
if name in s0 or name in s1:
diffs = (s0['interpolator'] - s1['interpolator']).data
if len(diffs) > 0 and 'nointerp' not in mode:
# 5%
assert_true(np.sqrt(np.mean(diffs ** 2)) < 0.10, name)
for name in ['nn', 'rr', 'nuse_tri', 'coord_frame', 'tris']:
if s0[name] is None:
assert_true(s1[name] is None, name)
else:
if mode == 'exact':
assert_array_equal(s0[name], s1[name], name)
else: # 'approx' in mode
assert_allclose(s0[name], s1[name], rtol=1e-3, atol=1e-4,
err_msg=name)
for name in ['seg_name']:
if name in s0 or name in s1:
assert_equal(s0[name], s1[name], name)
if mode == 'exact':
for name in ['inuse', 'vertno', 'use_tris']:
assert_array_equal(s0[name], s1[name])
# these fields will exist if patch info was added, these are
# not tested in mode == 'approx'
for name in ['nearest', 'nearest_dist']:
if s0[name] is None:
assert_true(s1[name] is None, name)
else:
assert_array_equal(s0[name], s1[name])
for name in ['dist_limit']:
assert_true(s0[name] == s1[name], name)
for name in ['dist']:
if s0[name] is not None:
assert_equal(s1[name].shape, s0[name].shape)
assert_true(len((s0['dist'] - s1['dist']).data) == 0)
for name in ['pinfo']:
if s0[name] is not None:
assert_true(len(s0[name]) == len(s1[name]))
for p1, p2 in zip(s0[name], s1[name]):
assert_true(all(p1 == p2))
else: # 'approx' in mode:
# deal with vertno, inuse, and use_tris carefully
assert_array_equal(s0['vertno'], np.where(s0['inuse'])[0])
assert_array_equal(s1['vertno'], np.where(s1['inuse'])[0])
assert_equal(len(s0['vertno']), len(s1['vertno']))
agreement = np.mean(s0['inuse'] == s1['inuse'])
assert_true(agreement > 0.99)
if agreement < 1.0:
# make sure mismatched vertno are within 1.5mm
v0 = np.setdiff1d(s0['vertno'], s1['vertno'])
v1 = np.setdiff1d(s1['vertno'], s0['vertno'])
dists = cdist(s0['rr'][v0], s1['rr'][v1])
assert_allclose(np.min(dists, axis=1), np.zeros(len(v0)),
atol=1.5e-3)
if s0['use_tris'] is not None: # for "spacing"
assert_array_equal(s0['use_tris'].shape, s1['use_tris'].shape)
else:
assert_true(s1['use_tris'] is None)
assert_true(np.mean(s0['use_tris'] == s1['use_tris']) > 0.99)
# The above "if s0[name] is not None" can be removed once the sample
# dataset is updated to have a source space with distance info
for name in ['working_dir', 'command_line']:
if mode == 'exact':
assert_equal(src0.info[name], src1.info[name])
else: # 'approx' in mode:
if name in src0.info:
assert_true(name in src1.info, name)
else:
assert_true(name not in src1.info, name)
|
effigies/mne-python
|
mne/source_space.py
|
Python
|
bsd-3-clause
| 91,787
|
[
"Mayavi"
] |
3dfe9c324fdf724af6f7cf32e9d3215341f5e9a360de051b221afd06b3b23fa7
|
# -*- coding: utf-8 -*-
# Copyright 2007-2022 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import copy
from numba import njit
import numpy as np
import scipy.ndimage as ndi
from skimage.feature import blob_dog, blob_log, match_template, peak_local_max
from hyperspy.misc.machine_learning import import_sklearn
NO_PEAKS = np.array([[np.nan, np.nan]])
@njit(cache=True)
def _fast_mean(X): # pragma: no cover
"""JIT-compiled mean of array.
Parameters
----------
X : :py:class:`numpy.ndarray`
Input array.
Returns
-------
mean : float
Mean of X.
Notes
-----
Used by scipy.ndimage.generic_filter in the find_peaks_stat
method to reduce overhead of repeated Python function calls.
See https://github.com/scipy/scipy/issues/8916 for more details.
"""
return np.mean(X)
@njit(cache=True)
def _fast_std(X): # pragma: no cover
"""JIT-compiled standard deviation of array.
Parameters
----------
X : :py:class:`numpy.ndarray`
Input array.
Returns
-------
std : float
Standard deviation of X.
Notes
-----
Used by scipy.ndimage.generic_filter in the find_peaks_stat
method to reduce overhead of repeated Python function calls.
See https://github.com/scipy/scipy/issues/8916 for more details.
"""
return np.std(X)
def clean_peaks(peaks):
"""Sort array of peaks and deal with no peaks being found.
Parameters
----------
peaks : :py:class:`numpy.ndarray`
Array of found peaks.
Returns
-------
peaks : :py:class:`numpy.ndarray`
Sorted array, first by `peaks[:,1]` (y-coordinate) then by `peaks[:,0]`
(x-coordinate), of found peaks.
NO_PEAKS : str
Flag indicating no peaks found.
"""
if len(peaks) == 0:
return NO_PEAKS
else:
ind = np.lexsort((peaks[:,0], peaks[:,1]))
return peaks[ind]
def find_local_max(z, **kwargs):
"""Method to locate positive peaks in an image by local maximum searching.
This function wraps :py:func:`skimage.feature.peak_local_max` function and
sorts the results for consistency with other peak finding methods.
Parameters
----------
z : :py:class:`numpy.ndarray`
Array of image intensities.
**kwargs : dict
Keyword arguments to be passed to the ``peak_local_max`` method of
the ``scikit-image`` library. See its documentation for details
http://scikit-image.org/docs/dev/api/skimage.feature.html#peak-local-max
Returns
-------
peaks : :py:class:`numpy.ndarray` of shape (n_peaks, 2)
Peak pixel coordinates.
"""
peaks = peak_local_max(z, **kwargs)
return clean_peaks(peaks)
def find_peaks_minmax(z, distance=5., threshold=10.):
"""Method to locate the positive peaks in an image by comparing maximum
and minimum filtered images.
Parameters
----------
z : numpy.ndarray
Matrix of image intensities.
distance : float
Expected distance between peaks.
threshold : float
Minimum difference between maximum and minimum filtered images.
Returns
-------
peaks : :py:class:`numpy.ndarray` of shape (n_peaks, 2)
Peak pixel coordinates.
"""
data_max = ndi.filters.maximum_filter(z, distance)
maxima = (z == data_max)
data_min = ndi.filters.minimum_filter(z, distance)
diff = ((data_max - data_min) > threshold)
maxima[diff == 0] = 0
labeled, num_objects = ndi.label(maxima)
peaks = np.array(
ndi.center_of_mass(z, labeled, range(1, num_objects + 1)))
return clean_peaks(np.round(peaks).astype(int))
def find_peaks_max(z, alpha=3., distance=10):
"""Method to locate positive peaks in an image by local maximum searching.
Parameters
----------
alpha : float
Only maxima above `alpha * sigma` are found, where `sigma` is the
standard deviation of the image.
distance : int
When a peak is found, all pixels in a square region of side
`2 * distance` are set to zero so that no further peaks can be found
in that region.
Returns
-------
peaks : :py:class:`numpy.ndarray` of shape (n_peaks, 2)
Peak pixel coordinates.
"""
# preallocate lots of peak storage
k_arr = []
# copy image
image_temp = copy.deepcopy(z)
peak_ct = 0
# calculate standard deviation of image for thresholding
sigma = np.std(z)
while True:
k = np.argmax(image_temp)
j, i = np.unravel_index(k, image_temp.shape)
if image_temp[j, i] >= alpha * sigma:
k_arr.append([j, i])
# masks peaks already identified.
x = np.arange(i - distance, i + distance)
y = np.arange(j - distance, j + distance)
xv, yv = np.meshgrid(x, y)
# clip to handle peaks near image edge
image_temp[yv.clip(0, image_temp.shape[0] - 1),
xv.clip(0, image_temp.shape[1] - 1)] = 0
peak_ct += 1
else:
break
peaks = np.array(k_arr)
return clean_peaks(peaks)
def find_peaks_zaefferer(z, grad_threshold=0.1, window_size=40,
distance_cutoff=50.):
"""Method to locate positive peaks in an image based on gradient
thresholding and subsequent refinement within masked regions.
Parameters
----------
z : :py:class:`numpy.ndarray`
Matrix of image intensities.
grad_threshold : float
The minimum gradient required to begin a peak search.
window_size : int
The size of the square window within which a peak search is
conducted. If odd, will round down to even. The size must be larger
than 2.
distance_cutoff : float
The maximum distance a peak may be from the initial
high-gradient point.
Returns
-------
peaks : :py:class:`numpy.ndarray` of shape (n_peaks, 2)
Peak pixel coordinates.
Notes
-----
Implemented as described in Zaefferer "New developments of computer-aided
crystallographic analysis in transmission electron microscopy" J. Ap. Cryst.
This version by Ben Martineau (2016)
"""
def box(x, y, window_size, x_max, y_max):
"""Produces a list of coordinates in the box about (x, y)."""
a = int(window_size / 2)
x_min = max(0, x - a)
x_max = min(x_max, x + a)
y_min = max(0, y - a)
y_max = min(y_max, y + a)
return np.mgrid[x_min:x_max, y_min:y_max].reshape(2, -1, order="F")
def get_max(image, box):
"""Finds the coordinates of the maximum of 'image' in 'box'."""
vals = image[tuple(box)]
ind = np.argmax(vals)
return tuple(box[:, ind])
def squared_distance(x, y):
"""Calculates the squared distance between two points."""
return (x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2
def gradient(image):
"""Calculates the square of the 2-d partial gradient.
Parameters
----------
image : :py:class:`numpy.ndarray`
The image for which the gradient will be calculated.
Returns
-------
gradient_of_image : :py:class:`numpy.ndarray`
The gradient of the image.
"""
gradient_of_image = np.gradient(image)
gradient_of_image = gradient_of_image[0] ** 2 + gradient_of_image[
1] ** 2
return gradient_of_image
# Check window size is appropriate.
if window_size < 2:
raise ValueError("`window_size` must be >= 2.")
# Generate an ordered list of matrix coordinates.
if len(z.shape) != 2:
raise ValueError("'z' should be a 2-d image matrix.")
z = z / np.max(z)
coordinates = np.indices(z.data.shape).reshape(2, -1).T
# Calculate the gradient at every point.
image_gradient = gradient(z)
# Boolean matrix of high-gradient points.
coordinates = coordinates[(image_gradient >= grad_threshold).flatten()]
# Compare against squared distance (avoids repeated sqrt calls)
distance_cutoff_sq = distance_cutoff ** 2
peaks = []
for coordinate in coordinates:
# Iterate over coordinates where the gradient is high enough.
b = box(coordinate[0], coordinate[1], window_size, z.shape[0],
z.shape[1])
p_old = (0, 0)
p_new = get_max(z, b)
while p_old[0] != p_new[0] and p_old[1] != p_new[1]:
p_old = p_new
b = box(p_old[0], p_old[1], window_size, z.shape[0], z.shape[1])
p_new = get_max(z, b)
if squared_distance(coordinate, p_new) > distance_cutoff_sq:
break
peaks.append(p_new)
peaks = np.array([p for p in set(peaks)])
return clean_peaks(peaks)
def find_peaks_stat(z, alpha=1.0, window_radius=10, convergence_ratio=0.05):
"""Method to locate positive peaks in an image based on statistical
refinement and difference with respect to mean intensity.
Parameters
----------
z : :py:class:`numpy.ndarray`
Array of image intensities.
alpha : float
Only maxima above `alpha * sigma` are found, where `sigma` is the
local, rolling standard deviation of the image.
window_radius : int
The pixel radius of the circular window for the calculation of the
rolling mean and standard deviation.
convergence_ratio : float
The algorithm will stop finding peaks when the proportion of new peaks
being found is less than `convergence_ratio`.
Returns
-------
peaks : :py:class:`numpy.ndarray` of shape (n_peaks, 2)
Peak pixel coordinates.
Notes
-----
Implemented as described in the PhD thesis of Thomas White, University of
Cambridge, 2009, with minor modifications to resolve ambiguities.
The algorithm is as follows:
1. Adjust the contrast and intensity bias of the image so that all pixels
have values between 0 and 1.
2. For each pixel, determine the mean and standard deviation of all pixels
inside a circle of radius 10 pixels centered on that pixel.
3. If the value of the pixel is greater than the mean of the pixels in the
circle by more than one standard deviation, set that pixel to have an
intensity of 1. Otherwise, set the intensity to 0.
4. Smooth the image by convovling it twice with a flat 3x3 kernel.
5. Let k = (1/2 - mu)/sigma where mu and sigma are the mean and standard
deviations of all the pixel intensities in the image.
6. For each pixel in the image, if the value of the pixel is greater than
mu + k*sigma set that pixel to have an intensity of 1. Otherwise, set the
intensity to 0.
7. Detect peaks in the image by locating the centers of gravity of regions
of adjacent pixels with a value of 1.
8. Repeat #4-7 until the number of peaks found in the previous step
converges to within the user defined convergence_ratio.
"""
if not import_sklearn.sklearn_installed:
raise ImportError("This method requires scikit-learn.")
def normalize(image):
"""Scales the image to intensities between 0 and 1."""
return image / np.max(image)
def _local_stat(image, radius, func):
"""Calculates rolling method 'func' over a circular kernel."""
x, y = np.ogrid[-radius : radius + 1, -radius : radius + 1]
kernel = np.hypot(x, y) < radius
stat = ndi.filters.generic_filter(image, func, footprint=kernel)
return stat
def local_mean(image, radius):
"""Calculates rolling mean over a circular kernel."""
return _local_stat(image, radius, _fast_mean)
def local_std(image, radius):
"""Calculates rolling standard deviation over a circular kernel."""
return _local_stat(image, radius, _fast_std)
def single_pixel_desensitize(image):
"""Reduces single-pixel anomalies by nearest-neighbor smoothing."""
kernel = np.array([[0.5, 1, 0.5], [1, 1, 1], [0.5, 1, 0.5]])
smoothed_image = ndi.filters.generic_filter(image, _fast_mean, footprint=kernel)
return smoothed_image
def stat_binarise(image):
"""Peaks more than one standard deviation from the mean set to one."""
image_rolling_mean = local_mean(image, window_radius)
image_rolling_std = local_std(image, window_radius)
image = single_pixel_desensitize(image)
binarised_image = np.zeros(image.shape)
stat_mask = image > (image_rolling_mean + alpha * image_rolling_std)
binarised_image[stat_mask] = 1
return binarised_image
def smooth(image):
"""Image convolved twice using a uniform 3x3 kernel."""
image = ndi.filters.uniform_filter(image, size=3)
image = ndi.filters.uniform_filter(image, size=3)
return image
def half_binarise(image):
"""Image binarised about values of one-half intensity."""
binarised_image = np.where(image > 0.5, 1, 0)
return binarised_image
def separate_peaks(binarised_image):
"""Identify adjacent 'on' coordinates via DBSCAN."""
bi = binarised_image.astype("bool")
coordinates = np.indices(bi.shape).reshape(2, -1).T[bi.flatten()]
db = import_sklearn.sklearn.cluster.DBSCAN(2, min_samples=3)
peaks = []
if coordinates.shape[0] > 0: # we have at least some peaks
labeled_points = db.fit_predict(coordinates)
for peak_label in list(set(labeled_points)):
peaks.append(coordinates[labeled_points == peak_label])
return peaks
def _peak_find_once(image):
"""Smooth, binarise, and find peaks according to main algorithm."""
image = smooth(image) # 4
image = half_binarise(image) # 5
peaks = separate_peaks(image) # 6
centers = np.array([np.mean(peak, axis=0) for peak in peaks]) # 7
return image, centers
def stat_peak_finder(image, convergence_ratio):
"""Find peaks in image. Algorithm stages in comments."""
# Image preparation
image = normalize(image) # 1
image = stat_binarise(image) # 2, 3
# Perform first iteration of peak finding
image, peaks_curr = _peak_find_once(image) # 4-7
n_peaks = len(peaks_curr)
if n_peaks == 0:
return peaks_curr
m_peaks = 0
# Repeat peak finding with more blurring to convergence
while (n_peaks - m_peaks) / n_peaks > convergence_ratio: # 8
m_peaks = n_peaks
peaks_old = np.copy(peaks_curr)
image, peaks_curr = _peak_find_once(image)
n_peaks = len(peaks_curr)
if n_peaks == 0:
return peaks_old
return peaks_curr
return clean_peaks(stat_peak_finder(z, convergence_ratio))
def find_peaks_dog(z, min_sigma=1., max_sigma=50., sigma_ratio=1.6,
threshold=0.2, overlap=0.5, exclude_border=False):
"""Method to locate peaks via the Difference of Gaussian Matrices method.
This function wraps :py:func:`skimage.feature.blob_dog` function and
sorts the results for consistency with other peak finding methods.
Parameters
----------
z : :py:class:`numpy.ndarray`
2-d array of intensities
min_sigma, max_sigma, sigma_ratio, threshold, overlap, exclude_border :
Additional parameters to be passed to the algorithm. See `blob_dog`
documentation for details:
http://scikit-image.org/docs/dev/api/skimage.feature.html#blob-dog
Returns
-------
peaks : :py:class:`numpy.ndarray` of shape (n_peaks, 2)
Peak pixel coordinates.
Notes
-----
While highly effective at finding even very faint peaks, this method is
sensitive to fluctuations in intensity near the edges of the image.
"""
z = z / np.max(z)
blobs = blob_dog(z, min_sigma=min_sigma, max_sigma=max_sigma,
sigma_ratio=sigma_ratio, threshold=threshold,
overlap=overlap, exclude_border=exclude_border)
try:
centers = np.round(blobs[:, :2]).astype(int)
except IndexError:
return NO_PEAKS
clean_centers = []
for center in centers:
if len(np.intersect1d(center, (0, 1) + z.shape + tuple(
c - 1 for c in z.shape))) > 0:
continue
clean_centers.append(center)
return clean_peaks(np.array(clean_centers))
def find_peaks_log(z, min_sigma=1., max_sigma=50., num_sigma=10,
threshold=0.2, overlap=0.5, log_scale=False,
exclude_border=False):
"""Method to locate peaks via the Laplacian of Gaussian Matrices method.
This function wraps :py:func:`skimage.feature.blob_log` function and
sorts the results for consistency with other peak finding methods.
Parameters
----------
z : :py:class:`numpy.ndarray`
Array of image intensities.
min_sigma, max_sigma, num_sigma, threshold, overlap, log_scale, exclude_border :
Additional parameters to be passed to the ``blob_log`` method of the
``scikit-image`` library. See its documentation for details:
http://scikit-image.org/docs/dev/api/skimage.feature.html#blob-log
Returns
-------
peaks : :py:class:`numpy.ndarray` of shape (n_peaks, 2)
Peak pixel coordinates.
"""
z = z / np.max(z)
if isinstance(num_sigma, float):
raise ValueError("`num_sigma` parameter should be an integer.")
blobs = blob_log(z, min_sigma=min_sigma, max_sigma=max_sigma,
num_sigma=num_sigma, threshold=threshold, overlap=overlap,
log_scale=log_scale, exclude_border=exclude_border)
# Attempt to return only peak positions. If no peaks exist, return an
# empty array.
try:
centers = np.round(blobs[:, :2]).astype(int)
ind = np.lexsort((centers[:,0], centers[:,1]))
except IndexError:
return NO_PEAKS
return centers[ind]
def find_peaks_xc(z, template, distance=5, threshold=0.5, **kwargs):
"""Find peaks in the cross correlation between the image and a template by
using the :py:func:`~hyperspy.utils.peakfinders2D.find_peaks_minmax` function
to find the peaks on the cross correlation result obtained using the
:py:func:`skimage.feature.match_template` function.
Parameters
----------
z : :py:class:`numpy.ndarray`
Array of image intensities.
template : numpy.ndarray (square)
Array containing a single bright disc, similar to those to detect.
distance : float
Expected distance between peaks.
threshold : float
Minimum difference between maximum and minimum filtered images.
**kwargs : dict
Keyword arguments to be passed to the
:py:func:`skimage.feature.match_template` function.
Returns
-------
peaks : :py:class:`numpy.ndarray` of shape (n_peaks, 2)
Array of peak coordinates.
"""
pad_input = kwargs.pop('pad_input', True)
response_image = match_template(z, template, pad_input=pad_input, **kwargs)
peaks = find_peaks_minmax(response_image,
distance=distance,
threshold=threshold)
return clean_peaks(peaks)
|
jat255/hyperspy
|
hyperspy/utils/peakfinders2D.py
|
Python
|
gpl-3.0
| 20,084
|
[
"Gaussian"
] |
35861865234cd3b2bc5d0ca336de7fb6a63c70140ce9cfe9d1663de0a771d22e
|
""" Class that contains client access to the transformation DB handler. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import six
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Base.Client import Client, createClient
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.TransformationSystem.Client import TransformationFilesStatus
@createClient("Transformation/TransformationManager")
class TransformationClient(Client):
"""Exposes the functionality available in the DIRAC/TransformationManagerHandler
This inherits the DIRAC base Client for direct execution of server functionality.
The following methods are available (although not visible here).
Transformation (table) manipulation
deleteTransformation(transName)
getTransformationParameters(transName,paramNames)
getTransformationWithStatus(status)
setTransformationParameter(transName,paramName,paramValue)
deleteTransformationParameter(transName,paramName)
TransformationFiles table manipulation
addFilesToTransformation(transName,lfns)
addTaskForTransformation(transName,lfns=[],se='Unknown')
getTransformationStats(transName)
TransformationTasks table manipulation
setTaskStatus(transName, taskID, status)
setTaskStatusAndWmsID(transName, taskID, status, taskWmsID)
getTransformationTaskStats(transName)
deleteTasks(transName, taskMin, taskMax)
extendTransformation( transName, nTasks)
getTasksToSubmit(transName,numTasks,site='')
TransformationLogging table manipulation
getTransformationLogging(transName)
File/directory manipulation methods (the remainder of the interface can be found below)
getFileSummary(lfns)
exists(lfns)
Web monitoring tools
getDistinctAttributeValues(attribute, selectDict)
getTransformationStatusCounters()
getTransformationSummary()
getTransformationSummaryWeb(selectDict, sortList, startItem, maxItems)
"""
def __init__(self, **kwargs):
"""Simple constructor"""
super(TransformationClient, self).__init__(**kwargs)
opsH = Operations()
self.maxResetCounter = opsH.getValue("Transformations/FilesMaxResetCounter", 10)
self.setServer("Transformation/TransformationManager")
def setServer(self, url):
self.serverURL = url
def getCounters(self, table, attrList, condDict, older=None, newer=None, timeStamp=None):
rpcClient = self._getRPC()
return rpcClient.getCounters(table, attrList, condDict, older, newer, timeStamp)
def addTransformation(
self,
transName,
description,
longDescription,
transType,
plugin,
agentType,
fileMask,
transformationGroup="General",
groupSize=1,
inheritedFrom=0,
body="",
maxTasks=0,
eventsPerTask=0,
addFiles=True,
inputMetaQuery=None,
outputMetaQuery=None,
timeout=1800,
):
"""add a new transformation"""
rpcClient = self._getRPC(timeout=timeout)
return rpcClient.addTransformation(
transName,
description,
longDescription,
transType,
plugin,
agentType,
fileMask,
transformationGroup,
groupSize,
inheritedFrom,
body,
maxTasks,
eventsPerTask,
addFiles,
inputMetaQuery,
outputMetaQuery,
)
def getTransformations(
self,
condDict=None,
older=None,
newer=None,
timeStamp=None,
orderAttribute=None,
limit=100,
extraParams=False,
columns=None,
):
"""gets all the transformations in the system, incrementally. "limit" here is just used to determine the offset."""
rpcClient = self._getRPC()
transformations = []
if condDict is None:
condDict = {}
if timeStamp is None:
timeStamp = "CreationDate"
# getting transformations - incrementally
offsetToApply = 0
while True:
res = rpcClient.getTransformations(
condDict, older, newer, timeStamp, orderAttribute, limit, extraParams, offsetToApply, columns
)
if not res["OK"]:
return res
else:
gLogger.verbose("Result for limit %d, offset %d: %d" % (limit, offsetToApply, len(res["Value"])))
if res["Value"]:
transformations = transformations + res["Value"]
offsetToApply += limit
if len(res["Value"]) < limit:
break
return S_OK(transformations)
def getTransformation(self, transName, extraParams=False):
rpcClient = self._getRPC()
return rpcClient.getTransformation(transName, extraParams)
def getTransformationFiles(
self,
condDict=None,
older=None,
newer=None,
timeStamp=None,
orderAttribute=None,
limit=None,
timeout=1800,
offset=0,
maxfiles=None,
):
"""gets all the transformation files for a transformation, incrementally.
"limit" here is just used to determine the offset.
"""
rpcClient = self._getRPC(timeout=timeout)
transformationFiles = []
if condDict is None:
condDict = {}
if timeStamp is None:
timeStamp = "LastUpdate"
# getting transformationFiles - incrementally
if "LFN" in condDict:
if isinstance(condDict["LFN"], six.string_types):
lfnList = [condDict["LFN"]]
else:
lfnList = sorted(condDict["LFN"])
# If a list of LFNs is given, use chunks of 1000 only
limit = limit if limit else 1000
else:
# By default get by chunks of 10000 files
lfnList = []
limit = limit if limit else 10000
transID = condDict.get("TransformationID", "Unknown")
offsetToApply = offset
retries = 5
while True:
if lfnList:
# If list is exhausted, exit
if offsetToApply >= len(lfnList):
break
# Apply the offset to the list of LFNs
condDict["LFN"] = lfnList[offsetToApply : offsetToApply + limit]
# No limit and no offset as the list is limited already
res = rpcClient.getTransformationFiles(condDict, older, newer, timeStamp, orderAttribute, None, None)
else:
res = rpcClient.getTransformationFiles(
condDict, older, newer, timeStamp, orderAttribute, limit, offsetToApply
)
if not res["OK"]:
gLogger.error(
"Error getting files for transformation %s (offset %d), %s"
% (str(transID), offsetToApply, ("retry %d times" % retries) if retries else "give up"),
res["Message"],
)
retries -= 1
if retries:
continue
return res
else:
condDictStr = str(condDict)
log = gLogger.debug if len(condDictStr) > 100 else gLogger.verbose
if not log(
"For conditions %s: result for limit %d, offset %d: %d files"
% (condDictStr, limit, offsetToApply, len(res["Value"]))
):
gLogger.verbose(
"For condition keys %s (trans %s): result for limit %d, offset %d: %d files"
% (
str(sorted(condDict)),
condDict.get("TransformationID", "None"),
limit,
offsetToApply,
len(res["Value"]),
)
)
if res["Value"]:
transformationFiles += res["Value"]
# Limit the number of files returned
if maxfiles and len(transformationFiles) >= maxfiles:
transformationFiles = transformationFiles[:maxfiles]
break
# Less data than requested, exit only if LFNs were not given
if not lfnList and len(res["Value"]) < limit:
break
offsetToApply += limit
# Reset number of retries for next chunk
retries = 5
return S_OK(transformationFiles)
def getTransformationTasks(
self, condDict=None, older=None, newer=None, timeStamp=None, orderAttribute=None, limit=10000, inputVector=False
):
"""gets all the transformation tasks for a transformation, incrementally.
"limit" here is just used to determine the offset.
"""
rpcClient = self._getRPC()
transformationTasks = []
if condDict is None:
condDict = {}
if timeStamp is None:
timeStamp = "CreationTime"
# getting transformationFiles - incrementally
offsetToApply = 0
while True:
res = rpcClient.getTransformationTasks(
condDict, older, newer, timeStamp, orderAttribute, limit, inputVector, offsetToApply
)
if not res["OK"]:
return res
else:
gLogger.verbose("Result for limit %d, offset %d: %d" % (limit, offsetToApply, len(res["Value"])))
if res["Value"]:
transformationTasks = transformationTasks + res["Value"]
offsetToApply += limit
if len(res["Value"]) < limit:
break
return S_OK(transformationTasks)
def cleanTransformation(self, transID):
"""Clean the transformation, and set the status parameter (doing it here, for easier extensibility)"""
# Cleaning
rpcClient = self._getRPC()
res = rpcClient.cleanTransformation(transID)
if not res["OK"]:
return res
# Setting the status
return self.setTransformationParameter(transID, "Status", "TransformationCleaned")
# Add methods to handle transformation status
def startTransformation(self, transID):
"""Start the transformation"""
res = self.setTransformationParameter(transID, "Status", "Active")
if not res["OK"]:
gLogger.error("Failed to start transformation %s: %s" % (transID, res["Message"]))
return res
else:
res = self.setTransformationParameter(transID, "AgentType", "Automatic")
if not res["OK"]:
gLogger.error("Failed to set AgentType to transformation %s: %s" % (transID, res["Message"]))
return res
def stopTransformation(self, transID):
"""Stop the transformation"""
res = self.setTransformationParameter(transID, "Status", "Stopped")
if not res["OK"]:
gLogger.error("Failed to stop transformation %s: %s" % (transID, res["Message"]))
return res
else:
res = self.setTransformationParameter(transID, "AgentType", "Manual")
if not res["OK"]:
gLogger.error("Failed to set AgentType to transformation %s: %s" % (transID, res["Message"]))
return res
def moveFilesToDerivedTransformation(self, transDict, resetUnused=True):
"""move files input to a transformation, to the derived one"""
prod = transDict["TransformationID"]
parentProd = int(transDict.get("InheritedFrom", 0))
movedFiles = {}
log = gLogger.getLocalSubLogger("[None] [%d] .moveFilesToDerivedTransformation:" % prod)
if not parentProd:
log.warn("Transformation was not derived...")
return S_OK((parentProd, movedFiles))
# get the lfns in status Unused/MaxReset of the parent production
res = self.getTransformationFiles(
condDict={
"TransformationID": parentProd,
"Status": [TransformationFilesStatus.UNUSED, TransformationFilesStatus.MAX_RESET],
}
)
if not res["OK"]:
log.error(" Error getting Unused files from transformation", "%d: %s" % (parentProd, res["Message"]))
return res
parentFiles = res["Value"]
lfns = [lfnDict["LFN"] for lfnDict in parentFiles]
if not lfns:
log.info(" No files found to be moved from transformation", "%d" % parentProd)
return S_OK((parentProd, movedFiles))
# get the lfns of the derived production that were Unused/MaxReset in the parent one
res = self.getTransformationFiles(condDict={"TransformationID": prod, "LFN": lfns})
if not res["OK"]:
log.error(" Error getting files from derived transformation:", res["Message"])
return res
derivedFiles = res["Value"]
derivedStatusDict = dict((derivedDict["LFN"], derivedDict["Status"]) for derivedDict in derivedFiles)
newStatusFiles = {}
parentStatusFiles = {}
badStatusFiles = {}
for parentDict in parentFiles:
lfn = parentDict["LFN"]
derivedStatus = derivedStatusDict.get(lfn)
if derivedStatus:
parentStatus = parentDict["Status"]
# By default move to the parent status (which is Unused or MaxReset)
status = parentStatus
moveStatus = parentStatus
# For MaxReset, set Unused if requested
if parentStatus == TransformationFilesStatus.MAX_RESET:
if resetUnused:
status = TransformationFilesStatus.UNUSED
moveStatus = "Unused from MaxReset"
else:
status = "MaxReset-inherited"
if derivedStatus.endswith("-inherited"):
# This is the general case
newStatusFiles.setdefault((status, parentStatus), []).append(lfn)
movedFiles[moveStatus] = movedFiles.setdefault(moveStatus, 0) + 1
else:
badStatusFiles[derivedStatus] = badStatusFiles.setdefault(derivedStatus, 0) + 1
if parentStatus == TransformationFilesStatus.UNUSED:
# If the file was Unused, set it NotProcessed in parent
parentStatusFiles.setdefault("NotProcessed", []).append(lfn)
else:
parentStatusFiles.setdefault("Moved", []).append(lfn)
for status, count in badStatusFiles.items(): # can be an iterator
log.warn(
"Files found in an unexpected status in derived transformation",
": %d files in status %s" % (count, status),
)
# Set the status in the parent transformation first
for status, lfnList in parentStatusFiles.items(): # can be an iterator
for lfnChunk in breakListIntoChunks(lfnList, 5000):
res = self.setFileStatusForTransformation(parentProd, status, lfnChunk)
if not res["OK"]:
log.error(
" Error setting status in transformation",
"%d: status %s for %d files - %s" % (parentProd, status, len(lfnList), res["Message"]),
)
# Set the status in the new transformation
for (status, oldStatus), lfnList in newStatusFiles.items(): # can be an iterator
for lfnChunk in breakListIntoChunks(lfnList, 5000):
res = self.setFileStatusForTransformation(prod, status, lfnChunk)
if not res["OK"]:
log.debug(
" Error setting status in transformation",
"%d: status %s for %d files; resetting them %s. %s"
% (parentProd, status, len(lfnChunk), oldStatus, res["Message"]),
)
res = self.setFileStatusForTransformation(parentProd, oldStatus, lfnChunk)
if not res["OK"]:
log.error(
" Error setting status in transformation",
" %d: status %s for %d files: %s" % (parentProd, oldStatus, len(lfnChunk), res["Message"]),
)
else:
log.info(
"Successfully moved files", ": %d files from %s to %s" % (len(lfnChunk), oldStatus, status)
)
# If files were Assigned or Unused at the time of derivation, try and update them as jobs may have run since then
res = self.getTransformationFiles(
condDict={
"TransformationID": prod,
"Status": [TransformationFilesStatus.ASSIGNED_INHERITED, TransformationFilesStatus.UNUSED_INHERITED],
}
)
if res["OK"]:
assignedFiles = res["Value"]
if assignedFiles:
lfns = [lfnDict["LFN"] for lfnDict in assignedFiles]
res = self.getTransformationFiles(condDict={"TransformationID": parentProd, "LFN": lfns})
if res["OK"]:
parentFiles = res["Value"]
processedLfns = [
lfnDict["LFN"]
for lfnDict in parentFiles
if lfnDict["Status"] == TransformationFilesStatus.PROCESSED
]
if processedLfns:
res = self.setFileStatusForTransformation(
prod, TransformationFilesStatus.PROCESSED_INHERITED, processedLfns
)
if res["OK"]:
log.info(
"Successfully set files status",
": %d files to status %s"
% (len(processedLfns), TransformationFilesStatus.PROCESSED_INHERITED),
)
if not res["OK"]:
log.error("Error setting status for Assigned derived files", res["Message"])
return S_OK((parentProd, movedFiles))
def setFileStatusForTransformation(self, transName, newLFNsStatus=None, lfns=None, force=False):
"""Sets the file status for LFNs of a transformation
For backward compatibility purposes, the status and LFNs can be passed in 2 ways:
- newLFNsStatus is a dictionary with the form:
{'/this/is/an/lfn1.txt': 'StatusA', '/this/is/an/lfn2.txt': 'StatusB', ... }
and at this point lfns is not considered
- newLFNStatus is a string, that applies to all the LFNs in lfns
"""
# create dictionary in case newLFNsStatus is a string
if isinstance(newLFNsStatus, six.string_types):
if not lfns:
return S_OK({})
if isinstance(lfns, six.string_types):
lfns = [lfns]
newLFNsStatus = dict.fromkeys(lfns, newLFNsStatus)
if not newLFNsStatus:
return S_OK({})
rpcClient = self._getRPC()
# gets current status, errorCount and fileID
tsFiles = self.getTransformationFiles({"TransformationID": transName, "LFN": list(newLFNsStatus)})
if not tsFiles["OK"]:
return tsFiles
tsFiles = tsFiles["Value"]
newStatuses = {}
if tsFiles:
# for convenience, makes a small dictionary out of the tsFiles, with the lfn as key
tsFilesAsDict = dict(
(tsFile["LFN"], [tsFile["Status"], tsFile["ErrorCount"], tsFile["FileID"]]) for tsFile in tsFiles
)
# applying the state machine to the proposed status
newStatuses = self._applyTransformationFilesStateMachine(tsFilesAsDict, newLFNsStatus, force)
if newStatuses: # if there's something to update
# Key to the service is fileIDs
# The value is a tuple with the new status and a flag that says if ErrorCount should be incremented
newStatusForFileIDs = dict(
(
tsFilesAsDict[lfn][2],
[newStatuses[lfn], self._wasFileInError(newStatuses[lfn], tsFilesAsDict[lfn][0])],
)
for lfn in newStatuses
)
res = rpcClient.setFileStatusForTransformation(transName, newStatusForFileIDs)
if not res["OK"]:
return res
return S_OK(newStatuses)
def _wasFileInError(self, newStatus, currentStatus):
"""Tells whether the file was Assigned and failed, i.e. was not Processed"""
return currentStatus == TransformationFilesStatus.ASSIGNED and newStatus != TransformationFilesStatus.PROCESSED
def _applyTransformationFilesStateMachine(self, tsFilesAsDict, dictOfProposedLFNsStatus, force):
"""For easier extension, here we apply the state machine of the production files.
VOs might want to replace the standard here with something they prefer.
tsFiles is a dictionary with the lfn as key and as value a list of [Status, ErrorCount, FileID]
dictOfNewLFNsStatus is a dictionary with the proposed status
force is a boolean
It returns a dictionary with the status updates
"""
newStatuses = {}
for lfn, newStatus in dictOfProposedLFNsStatus.items(): # can be an iterator
if lfn in tsFilesAsDict:
currentStatus = tsFilesAsDict[lfn][0]
# Apply optional corrections
if (
currentStatus == TransformationFilesStatus.PROCESSED
and newStatus != TransformationFilesStatus.PROCESSED
):
# Processed files should be in a final status unless forced
if not force:
newStatus = TransformationFilesStatus.PROCESSED
elif currentStatus == TransformationFilesStatus.MAX_RESET:
# MaxReset files can go to any status except Unused (unless forced)
if newStatus == TransformationFilesStatus.UNUSED and not force:
newStatus = TransformationFilesStatus.MAX_RESET
elif newStatus == TransformationFilesStatus.UNUSED:
errorCount = tsFilesAsDict[lfn][1]
# every 10 retries (by default) the file cannot be reset Unused any longer
if errorCount and ((errorCount % self.maxResetCounter) == 0) and not force:
newStatus = TransformationFilesStatus.MAX_RESET
# Only worth changing status if it is different
if newStatus != currentStatus:
newStatuses[lfn] = newStatus
return newStatuses
def setTransformationParameter(self, transID, paramName, paramValue, force=False, currentStatus=None):
"""Sets a transformation parameter. There's a special case when coming to setting the status of a transformation.
:param currentStatus: if set, make sure the status did not change in the DB before setting it
"""
rpcClient = self._getRPC()
if paramName.lower() == "status":
# get transformation Type
transformation = self.getTransformation(transID)
if not transformation["OK"]:
return transformation
transformationType = transformation["Value"]["Type"]
# get status as of today
originalStatus = self.getTransformationParameters(transID, "Status")
if not originalStatus["OK"]:
return originalStatus
originalStatus = originalStatus["Value"]
if currentStatus and currentStatus != originalStatus:
return S_ERROR("Status changed in the DB: %s" % originalStatus)
transIDAsDict = {transID: [originalStatus, transformationType]}
dictOfProposedstatus = {transID: paramValue}
# applying the state machine to the proposed status
value = self._applyTransformationStatusStateMachine(transIDAsDict, dictOfProposedstatus, force)
else:
value = paramValue
return rpcClient.setTransformationParameter(transID, paramName, value)
def _applyTransformationStatusStateMachine(self, transIDAsDict, dictOfProposedstatus, force):
"""For easier extension, here we apply the state machine of the transformation status.
VOs might want to replace the standard here with something they prefer.
transIDAsDict is a dictionary with the transID as key and as value a list with [Status, Type]
dictOfProposedstatus is a dictionary with the proposed status
force is a boolean
It returns the new status (the standard is just doing nothing: everything is possible)
"""
return list(dictOfProposedstatus.values())[0]
def isOK(self):
return self.valid
def addDirectory(self, path, force=False):
rpcClient = self._getRPC()
return rpcClient.addDirectory(path, force)
|
ic-hep/DIRAC
|
src/DIRAC/TransformationSystem/Client/TransformationClient.py
|
Python
|
gpl-3.0
| 25,787
|
[
"DIRAC"
] |
3873ffff7bd4da954cf2742c4707bac5c060254e0798a9d6da7170f077ebf53d
|
from sys import argv
from ase.lattice.surface import hcp0001, add_adsorbate
from ase.constraints import FixAtoms
from ase.optimize.lbfgs import LBFGS
from gpaw import GPAW, Mixer, FermiDirac
tag = 'Ru001'
adsorbate_heights = {'H': 1.0, 'N': 1.108, 'O': 1.257}
slab = hcp0001('Ru', size=(2, 2, 4), a=2.72, c=1.58*2.72, vacuum=7.0,
orthogonal=True)
slab.center(axis=2)
if len(argv) > 1:
adsorbate = argv[1]
tag = adsorbate + tag
add_adsorbate(slab, adsorbate, adsorbate_heights[adsorbate], 'hcp')
slab.set_constraint(FixAtoms(mask=slab.get_tags() >= 3))
calc = GPAW(xc='PBE',
h=0.2,
mixer=Mixer(0.1, 5, weight=100.0),
stencils=(3, 3),
occupations=FermiDirac(width=0.1),
kpts=[4, 4, 1],
eigensolver='cg',
txt=tag + '.txt')
slab.set_calculator(calc)
opt = LBFGS(slab, logfile=tag + '.log', trajectory=tag + '.traj')
opt.run(fmax=0.05)
calc.write(tag)
|
robwarm/gpaw-symm
|
gpaw/test/big/Ru001/ruslab.py
|
Python
|
gpl-3.0
| 967
|
[
"ASE",
"GPAW"
] |
4b0904dbd957bee51186771bfdfdb2cfef6c7ab10ffdc677ff4da9bce8d296e9
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This program is part of pygimli
Visit http://www.resistivity.net for further information or the latest version.
"""
import sys
from os import system
try:
import pygimli as pg
except ImportError:
sys.stderr.write('ERROR: cannot import the library pygimli.'+ \
' Ensure that pygimli is in your PYTHONPATH')
sys.exit(1)
def main(argv):
from optparse import OptionParser
parser = OptionParser("usage: %prog [options] mesh|mod")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
help="be verbose", default=False)
(options, args) = parser.parse_args()
print((options, args))
if __name__ == "__main__":
main(sys.argv[1:])
|
florian-wagner/gimli
|
python/apps/app_template.py
|
Python
|
gpl-3.0
| 777
|
[
"VisIt"
] |
487376458d0b8651364f56244e5f05d8d457b63a33b51b0064a392c802cfe3ea
|
"""
Computational Neurodynamics
Exercise 2
Simulates two layers of Izhikevich neurons. Layer 0 is stimulated
with a constant base current and layer 1 receives synaptic input
of layer 0.
(C) Murray Shanahan et al, 2015
"""
from ConnectQIF2L import ConnectQIF2L
import numpy as np
import matplotlib.pyplot as plt
N1 = 4
N2 = 4
T = 500 # Simulation time
Ib = 15 # Base current
net = ConnectQIF2L(N1, N2)
## Initialise layers
for lr in xrange(len(net.layer)):
net.layer[lr].v = -65
net.layer[lr].firings = np.array([])
v1 = np.zeros([T, N1])
v2 = np.zeros([T, N2])
## SIMULATE
for t in xrange(T):
# Deliver a constant base current to layer 1
net.layer[0].I = Ib * np.ones(N1)
net.layer[1].I = np.zeros(N2)
net.Update(t)
v1[t] = net.layer[0].v
v2[t] = net.layer[1].v
## Retrieve firings and add Dirac pulses for presentation
firings1 = net.layer[0].firings
firings2 = net.layer[1].firings
if firings1.size != 0:
v1[firings1[:, 0], firings1[:, 1]] = 30
if firings2.size != 0:
v2[firings2[:, 0], firings2[:, 1]] = 30
## Plot membrane potentials
plt.figure(1)
plt.subplot(211)
plt.plot(range(T), v1)
plt.title('Population 1 membrane potentials')
plt.ylabel('Voltage (mV)')
plt.ylim([-90, 40])
plt.subplot(212)
plt.plot(range(T), v2)
plt.title('Population 2 membrane potentials')
plt.ylabel('Voltage (mV)')
plt.ylim([-90, 40])
plt.xlabel('Time (ms)')
## Raster plots of firings
if firings1.size != 0:
plt.figure(3)
plt.subplot(211)
plt.scatter(firings1[:, 0], firings1[:, 1] + 1, marker='.')
plt.xlim(0, T)
plt.ylabel('Neuron number')
plt.ylim(0, N1+1)
plt.title('Population 1 firings')
if firings2.size != 0:
plt.subplot(212)
plt.scatter(firings2[:, 0], firings2[:, 1] + 1, marker='.')
plt.xlim(0, T)
plt.ylabel('Neuron number')
plt.ylim(0, N2+1)
plt.xlabel('Time (ms)')
plt.title('Population 2 firings')
plt.show()
|
pmediano/ComputationalNeurodynamics
|
Fall2015/Exercise_2/Solutions/RunQIF2L.py
|
Python
|
gpl-3.0
| 1,888
|
[
"DIRAC",
"NEURON"
] |
ee8a99093d5a695730fc49d32a37f47313d9835ca69209a41bd27d52de86176f
|
# -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
# Copyright 2003-2006 Dirk Meyer <dischi@freevo.org>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
import string
import re
import struct
__all__ = ['resolve']
def resolve(code):
"""
Transform a twocc or fourcc code into a name. Returns a 2-tuple of (cc,
codec) where both are strings and cc is a string in the form '0xXX' if it's
a twocc, or 'ABCD' if it's a fourcc. If the given code is not a known
twocc or fourcc, the return value will be (None, 'Unknown'), unless the
code is otherwise a printable string in which case it will be returned as
the codec.
"""
if isinstance(code, basestring):
codec = u'Unknown'
# Check for twocc
if re.match(r'^0x[\da-f]{1,4}$', code, re.I):
# Twocc in hex form
return code, TWOCC.get(int(code, 16), codec)
elif code.isdigit() and 0 <= int(code) <= 0xff:
# Twocc in decimal form
return hex(int(code)), TWOCC.get(int(code), codec)
elif len(code) == 2:
code = struct.unpack('H', code)[0]
return hex(code), TWOCC.get(code, codec)
elif len(code) != 4 and len([x for x in code if x not in string.printable]) == 0:
# Code is a printable string.
codec = unicode(code)
if code[:2] == 'MS' and code[2:].upper() in FOURCC:
code = code[2:]
if code.upper() in FOURCC:
return code.upper(), unicode(FOURCC[code.upper()])
return None, codec
elif isinstance(code, (int, long)):
return hex(code), TWOCC.get(code, u'Unknown')
return None, u'Unknown'
TWOCC = {
0x0000: 'Unknown Wave Format',
0x0001: 'PCM',
0x0002: 'Microsoft ADPCM',
0x0003: 'IEEE Float',
0x0004: 'Compaq Computer VSELP',
0x0005: 'IBM CVSD',
0x0006: 'A-Law',
0x0007: 'mu-Law',
0x0008: 'Microsoft DTS',
0x0009: 'Microsoft DRM',
0x0010: 'OKI ADPCM',
0x0011: 'Intel DVI/IMA ADPCM',
0x0012: 'Videologic MediaSpace ADPCM',
0x0013: 'Sierra Semiconductor ADPCM',
0x0014: 'Antex Electronics G.723 ADPCM',
0x0015: 'DSP Solutions DigiSTD',
0x0016: 'DSP Solutions DigiFIX',
0x0017: 'Dialogic OKI ADPCM',
0x0018: 'MediaVision ADPCM',
0x0019: 'Hewlett-Packard CU',
0x0020: 'Yamaha ADPCM',
0x0021: 'Speech Compression Sonarc',
0x0022: 'DSP Group TrueSpeech',
0x0023: 'Echo Speech EchoSC1',
0x0024: 'Audiofile AF36',
0x0025: 'Audio Processing Technology APTX',
0x0026: 'AudioFile AF10',
0x0027: 'Prosody 1612',
0x0028: 'LRC',
0x0030: 'Dolby AC2',
0x0031: 'Microsoft GSM 6.10',
0x0032: 'MSNAudio',
0x0033: 'Antex Electronics ADPCME',
0x0034: 'Control Resources VQLPC',
0x0035: 'DSP Solutions DigiREAL',
0x0036: 'DSP Solutions DigiADPCM',
0x0037: 'Control Resources CR10',
0x0038: 'Natural MicroSystems VBXADPCM',
0x0039: 'Crystal Semiconductor IMA ADPCM',
0x003A: 'EchoSC3',
0x003B: 'Rockwell ADPCM',
0x003C: 'Rockwell Digit LK',
0x003D: 'Xebec',
0x0040: 'Antex Electronics G.721 ADPCM',
0x0041: 'G.728 CELP',
0x0042: 'MSG723',
0x0043: 'IBM AVC ADPCM',
0x0045: 'ITU-T G.726 ADPCM',
0x0050: 'MPEG 1, Layer 1,2',
0x0052: 'RT24',
0x0053: 'PAC',
0x0055: 'MPEG Layer 3',
0x0059: 'Lucent G.723',
0x0060: 'Cirrus',
0x0061: 'ESPCM',
0x0062: 'Voxware',
0x0063: 'Canopus Atrac',
0x0064: 'G.726 ADPCM',
0x0065: 'G.722 ADPCM',
0x0066: 'DSAT',
0x0067: 'DSAT Display',
0x0069: 'Voxware Byte Aligned',
0x0070: 'Voxware AC8',
0x0071: 'Voxware AC10',
0x0072: 'Voxware AC16',
0x0073: 'Voxware AC20',
0x0074: 'Voxware MetaVoice',
0x0075: 'Voxware MetaSound',
0x0076: 'Voxware RT29HW',
0x0077: 'Voxware VR12',
0x0078: 'Voxware VR18',
0x0079: 'Voxware TQ40',
0x0080: 'Softsound',
0x0081: 'Voxware TQ60',
0x0082: 'MSRT24',
0x0083: 'G.729A',
0x0084: 'MVI MV12',
0x0085: 'DF G.726',
0x0086: 'DF GSM610',
0x0088: 'ISIAudio',
0x0089: 'Onlive',
0x0091: 'SBC24',
0x0092: 'Dolby AC3 SPDIF',
0x0093: 'MediaSonic G.723',
0x0094: 'Aculab PLC Prosody 8KBPS',
0x0097: 'ZyXEL ADPCM',
0x0098: 'Philips LPCBB',
0x0099: 'Packed',
0x00A0: 'Malden Electronics PHONYTALK',
0x00FF: 'AAC',
0x0100: 'Rhetorex ADPCM',
0x0101: 'IBM mu-law',
0x0102: 'IBM A-law',
0x0103: 'IBM AVC Adaptive Differential Pulse Code Modulation',
0x0111: 'Vivo G.723',
0x0112: 'Vivo Siren',
0x0123: 'Digital G.723',
0x0125: 'Sanyo LD ADPCM',
0x0130: 'Sipro Lab Telecom ACELP.net',
0x0131: 'Sipro Lab Telecom ACELP.4800',
0x0132: 'Sipro Lab Telecom ACELP.8V3',
0x0133: 'Sipro Lab Telecom ACELP.G.729',
0x0134: 'Sipro Lab Telecom ACELP.G.729A',
0x0135: 'Sipro Lab Telecom ACELP.KELVIN',
0x0140: 'Windows Media Video V8',
0x0150: 'Qualcomm PureVoice',
0x0151: 'Qualcomm HalfRate',
0x0155: 'Ring Zero Systems TUB GSM',
0x0160: 'Windows Media Audio V1 / DivX audio (WMA)',
0x0161: 'Windows Media Audio V7 / V8 / V9',
0x0162: 'Windows Media Audio Professional V9',
0x0163: 'Windows Media Audio Lossless V9',
0x0170: 'UNISYS NAP ADPCM',
0x0171: 'UNISYS NAP ULAW',
0x0172: 'UNISYS NAP ALAW',
0x0173: 'UNISYS NAP 16K',
0x0200: 'Creative Labs ADPCM',
0x0202: 'Creative Labs Fastspeech8',
0x0203: 'Creative Labs Fastspeech10',
0x0210: 'UHER Informatic ADPCM',
0x0215: 'Ulead DV ACM',
0x0216: 'Ulead DV ACM',
0x0220: 'Quarterdeck',
0x0230: 'I-link Worldwide ILINK VC',
0x0240: 'Aureal Semiconductor RAW SPORT',
0x0241: 'ESST AC3',
0x0250: 'Interactive Products HSX',
0x0251: 'Interactive Products RPELP',
0x0260: 'Consistent Software CS2',
0x0270: 'Sony ATRAC3 (SCX, same as MiniDisk LP2)',
0x0300: 'Fujitsu FM Towns Snd',
0x0400: 'BTV Digital',
0x0401: 'Intel Music Coder (IMC)',
0x0402: 'Ligos Indeo Audio',
0x0450: 'QDesign Music',
0x0680: 'VME VMPCM',
0x0681: 'AT&T Labs TPC',
0x0700: 'YMPEG Alpha',
0x08AE: 'ClearJump LiteWave',
0x1000: 'Olivetti GSM',
0x1001: 'Olivetti ADPCM',
0x1002: 'Olivetti CELP',
0x1003: 'Olivetti SBC',
0x1004: 'Olivetti OPR',
0x1100: 'Lernout & Hauspie LH Codec',
0x1101: 'Lernout & Hauspie CELP codec',
0x1102: 'Lernout & Hauspie SBC codec',
0x1103: 'Lernout & Hauspie SBC codec',
0x1104: 'Lernout & Hauspie SBC codec',
0x1400: 'Norris',
0x1401: 'AT&T ISIAudio',
0x1500: 'Soundspace Music Compression',
0x181C: 'VoxWare RT24 speech codec',
0x181E: 'Lucent elemedia AX24000P Music codec',
0x1C07: 'Lucent SX8300P speech codec',
0x1C0C: 'Lucent SX5363S G.723 compliant codec',
0x1F03: 'CUseeMe DigiTalk (ex-Rocwell)',
0x1FC4: 'NCT Soft ALF2CD ACM',
0x2000: 'AC3',
0x2001: 'Dolby DTS (Digital Theater System)',
0x2002: 'RealAudio 1 / 2 14.4',
0x2003: 'RealAudio 1 / 2 28.8',
0x2004: 'RealAudio G2 / 8 Cook (low bitrate)',
0x2005: 'RealAudio 3 / 4 / 5 Music (DNET)',
0x2006: 'RealAudio 10 AAC (RAAC)',
0x2007: 'RealAudio 10 AAC+ (RACP)',
0x3313: 'makeAVIS',
0x4143: 'Divio MPEG-4 AAC audio',
0x434C: 'LEAD Speech',
0x564C: 'LEAD Vorbis',
0x674F: 'Ogg Vorbis (mode 1)',
0x6750: 'Ogg Vorbis (mode 2)',
0x6751: 'Ogg Vorbis (mode 3)',
0x676F: 'Ogg Vorbis (mode 1+)',
0x6770: 'Ogg Vorbis (mode 2+)',
0x6771: 'Ogg Vorbis (mode 3+)',
0x7A21: 'GSM-AMR (CBR, no SID)',
0x7A22: 'GSM-AMR (VBR, including SID)',
0xDFAC: 'DebugMode SonicFoundry Vegas FrameServer ACM Codec',
0xF1AC: 'Free Lossless Audio Codec FLAC',
0xFFFE: 'Extensible wave format',
0xFFFF: 'development'
}
FOURCC = {
'1978': 'A.M.Paredes predictor (LossLess)',
'2VUY': 'Optibase VideoPump 8-bit 4:2:2 Component YCbCr',
'3IV0': 'MPEG4-based codec 3ivx',
'3IV1': '3ivx v1',
'3IV2': '3ivx v2',
'3IVD': 'FFmpeg DivX ;-) (MS MPEG-4 v3)',
'3IVX': 'MPEG4-based codec 3ivx',
'8BPS': 'Apple QuickTime Planar RGB with Alpha-channel',
'AAS4': 'Autodesk Animator codec (RLE)',
'AASC': 'Autodesk Animator',
'ABYR': 'Kensington ABYR',
'ACTL': 'Streambox ACT-L2',
'ADV1': 'Loronix WaveCodec',
'ADVJ': 'Avid M-JPEG Avid Technology Also known as AVRn',
'AEIK': 'Intel Indeo Video 3.2',
'AEMI': 'Array VideoONE MPEG1-I Capture',
'AFLC': 'Autodesk Animator FLC',
'AFLI': 'Autodesk Animator FLI',
'AHDV': 'CineForm 10-bit Visually Perfect HD',
'AJPG': '22fps JPEG-based codec for digital cameras',
'AMPG': 'Array VideoONE MPEG',
'ANIM': 'Intel RDX (ANIM)',
'AP41': 'AngelPotion Definitive',
'AP42': 'AngelPotion Definitive',
'ASLC': 'AlparySoft Lossless Codec',
'ASV1': 'Asus Video v1',
'ASV2': 'Asus Video v2',
'ASVX': 'Asus Video 2.0 (audio)',
'ATM4': 'Ahead Nero Digital MPEG-4 Codec',
'AUR2': 'Aura 2 Codec - YUV 4:2:2',
'AURA': 'Aura 1 Codec - YUV 4:1:1',
'AV1X': 'Avid 1:1x (Quick Time)',
'AVC1': 'H.264 AVC',
'AVD1': 'Avid DV (Quick Time)',
'AVDJ': 'Avid Meridien JFIF with Alpha-channel',
'AVDN': 'Avid DNxHD (Quick Time)',
'AVDV': 'Avid DV',
'AVI1': 'MainConcept Motion JPEG Codec',
'AVI2': 'MainConcept Motion JPEG Codec',
'AVID': 'Avid Motion JPEG',
'AVIS': 'Wrapper for AviSynth',
'AVMP': 'Avid IMX (Quick Time)',
'AVR ': 'Avid ABVB/NuVista MJPEG with Alpha-channel',
'AVRN': 'Avid Motion JPEG',
'AVUI': 'Avid Meridien Uncompressed with Alpha-channel',
'AVUP': 'Avid 10bit Packed (Quick Time)',
'AYUV': '4:4:4 YUV (AYUV)',
'AZPR': 'Quicktime Apple Video',
'AZRP': 'Quicktime Apple Video',
'BGR ': 'Uncompressed BGR32 8:8:8:8',
'BGR(15)': 'Uncompressed BGR15 5:5:5',
'BGR(16)': 'Uncompressed BGR16 5:6:5',
'BGR(24)': 'Uncompressed BGR24 8:8:8',
'BHIV': 'BeHere iVideo',
'BINK': 'RAD Game Tools Bink Video',
'BIT ': 'BI_BITFIELDS (Raw RGB)',
'BITM': 'Microsoft H.261',
'BLOX': 'Jan Jezabek BLOX MPEG Codec',
'BLZ0': 'DivX for Blizzard Decoder Filter',
'BT20': 'Conexant Prosumer Video',
'BTCV': 'Conexant Composite Video Codec',
'BTVC': 'Conexant Composite Video',
'BW00': 'BergWave (Wavelet)',
'BW10': 'Data Translation Broadway MPEG Capture',
'BXBG': 'BOXX BGR',
'BXRG': 'BOXX RGB',
'BXY2': 'BOXX 10-bit YUV',
'BXYV': 'BOXX YUV',
'CC12': 'Intel YUV12',
'CDV5': 'Canopus SD50/DVHD',
'CDVC': 'Canopus DV',
'CDVH': 'Canopus SD50/DVHD',
'CFCC': 'Digital Processing Systems DPS Perception',
'CFHD': 'CineForm 10-bit Visually Perfect HD',
'CGDI': 'Microsoft Office 97 Camcorder Video',
'CHAM': 'Winnov Caviara Champagne',
'CJPG': 'Creative WebCam JPEG',
'CLJR': 'Cirrus Logic YUV 4 pixels',
'CLLC': 'Canopus LossLess',
'CLPL': 'YV12',
'CMYK': 'Common Data Format in Printing',
'COL0': 'FFmpeg DivX ;-) (MS MPEG-4 v3)',
'COL1': 'FFmpeg DivX ;-) (MS MPEG-4 v3)',
'CPLA': 'Weitek 4:2:0 YUV Planar',
'CRAM': 'Microsoft Video 1 (CRAM)',
'CSCD': 'RenderSoft CamStudio lossless Codec',
'CTRX': 'Citrix Scalable Video Codec',
'CUVC': 'Canopus HQ',
'CVID': 'Radius Cinepak',
'CWLT': 'Microsoft Color WLT DIB',
'CYUV': 'Creative Labs YUV',
'CYUY': 'ATI YUV',
'D261': 'H.261',
'D263': 'H.263',
'DAVC': 'Dicas MPEGable H.264/MPEG-4 AVC base profile codec',
'DC25': 'MainConcept ProDV Codec',
'DCAP': 'Pinnacle DV25 Codec',
'DCL1': 'Data Connection Conferencing Codec',
'DCT0': 'WniWni Codec',
'DFSC': 'DebugMode FrameServer VFW Codec',
'DIB ': 'Full Frames (Uncompressed)',
'DIV1': 'FFmpeg-4 V1 (hacked MS MPEG-4 V1)',
'DIV2': 'MS MPEG-4 V2',
'DIV3': 'DivX v3 MPEG-4 Low-Motion',
'DIV4': 'DivX v3 MPEG-4 Fast-Motion',
'DIV5': 'DIV5',
'DIV6': 'DivX MPEG-4',
'DIVX': 'DivX',
'DM4V': 'Dicas MPEGable MPEG-4',
'DMB1': 'Matrox Rainbow Runner hardware MJPEG',
'DMB2': 'Paradigm MJPEG',
'DMK2': 'ViewSonic V36 PDA Video',
'DP02': 'DynaPel MPEG-4',
'DPS0': 'DPS Reality Motion JPEG',
'DPSC': 'DPS PAR Motion JPEG',
'DRWX': 'Pinnacle DV25 Codec',
'DSVD': 'DSVD',
'DTMT': 'Media-100 Codec',
'DTNT': 'Media-100 Codec',
'DUCK': 'Duck True Motion 1.0',
'DV10': 'BlueFish444 (lossless RGBA, YUV 10-bit)',
'DV25': 'Matrox DVCPRO codec',
'DV50': 'Matrox DVCPRO50 codec',
'DVAN': 'DVAN',
'DVC ': 'Apple QuickTime DV (DVCPRO NTSC)',
'DVCP': 'Apple QuickTime DV (DVCPRO PAL)',
'DVCS': 'MainConcept DV Codec',
'DVE2': 'InSoft DVE-2 Videoconferencing',
'DVH1': 'Pinnacle DVHD100',
'DVHD': 'DV 1125 lines at 30.00 Hz or 1250 lines at 25.00 Hz',
'DVIS': 'VSYNC DualMoon Iris DV codec',
'DVL ': 'Radius SoftDV 16:9 NTSC',
'DVLP': 'Radius SoftDV 16:9 PAL',
'DVMA': 'Darim Vision DVMPEG',
'DVOR': 'BlueFish444 (lossless RGBA, YUV 10-bit)',
'DVPN': 'Apple QuickTime DV (DV NTSC)',
'DVPP': 'Apple QuickTime DV (DV PAL)',
'DVR1': 'TARGA2000 Codec',
'DVRS': 'VSYNC DualMoon Iris DV codec',
'DVSD': 'DV',
'DVSL': 'DV compressed in SD (SDL)',
'DVX1': 'DVX1000SP Video Decoder',
'DVX2': 'DVX2000S Video Decoder',
'DVX3': 'DVX3000S Video Decoder',
'DX50': 'DivX v5',
'DXGM': 'Electronic Arts Game Video codec',
'DXSB': 'DivX Subtitles Codec',
'DXT1': 'Microsoft DirectX Compressed Texture (DXT1)',
'DXT2': 'Microsoft DirectX Compressed Texture (DXT2)',
'DXT3': 'Microsoft DirectX Compressed Texture (DXT3)',
'DXT4': 'Microsoft DirectX Compressed Texture (DXT4)',
'DXT5': 'Microsoft DirectX Compressed Texture (DXT5)',
'DXTC': 'Microsoft DirectX Compressed Texture (DXTC)',
'DXTN': 'Microsoft DirectX Compressed Texture (DXTn)',
'EKQ0': 'Elsa EKQ0',
'ELK0': 'Elsa ELK0',
'EM2V': 'Etymonix MPEG-2 I-frame',
'EQK0': 'Elsa graphics card quick codec',
'ESCP': 'Eidos Escape',
'ETV1': 'eTreppid Video ETV1',
'ETV2': 'eTreppid Video ETV2',
'ETVC': 'eTreppid Video ETVC',
'FFDS': 'FFDShow supported',
'FFV1': 'FFDShow supported',
'FFVH': 'FFVH codec',
'FLIC': 'Autodesk FLI/FLC Animation',
'FLJP': 'D-Vision Field Encoded Motion JPEG',
'FLV1': 'FLV1 codec',
'FMJP': 'D-Vision fieldbased ISO MJPEG',
'FRLE': 'SoftLab-NSK Y16 + Alpha RLE',
'FRWA': 'SoftLab-Nsk Forward Motion JPEG w/ alpha channel',
'FRWD': 'SoftLab-Nsk Forward Motion JPEG',
'FRWT': 'SoftLab-NSK Vision Forward Motion JPEG with Alpha-channel',
'FRWU': 'SoftLab-NSK Vision Forward Uncompressed',
'FVF1': 'Iterated Systems Fractal Video Frame',
'FVFW': 'ff MPEG-4 based on XviD codec',
'GEPJ': 'White Pine (ex Paradigm Matrix) Motion JPEG Codec',
'GJPG': 'Grand Tech GT891x Codec',
'GLCC': 'GigaLink AV Capture codec',
'GLZW': 'Motion LZW',
'GPEG': 'Motion JPEG',
'GPJM': 'Pinnacle ReelTime MJPEG Codec',
'GREY': 'Apparently a duplicate of Y800',
'GWLT': 'Microsoft Greyscale WLT DIB',
'H260': 'H.260',
'H261': 'H.261',
'H262': 'H.262',
'H263': 'H.263',
'H264': 'H.264 AVC',
'H265': 'H.265',
'H266': 'H.266',
'H267': 'H.267',
'H268': 'H.268',
'H269': 'H.269',
'HD10': 'BlueFish444 (lossless RGBA, YUV 10-bit)',
'HDX4': 'Jomigo HDX4',
'HFYU': 'Huffman Lossless Codec',
'HMCR': 'Rendition Motion Compensation Format (HMCR)',
'HMRR': 'Rendition Motion Compensation Format (HMRR)',
'I263': 'Intel ITU H.263 Videoconferencing (i263)',
'I420': 'Intel Indeo 4',
'IAN ': 'Intel RDX',
'ICLB': 'InSoft CellB Videoconferencing',
'IDM0': 'IDM Motion Wavelets 2.0',
'IF09': 'Microsoft H.261',
'IGOR': 'Power DVD',
'IJPG': 'Intergraph JPEG',
'ILVC': 'Intel Layered Video',
'ILVR': 'ITU-T H.263+',
'IMC1': 'IMC1',
'IMC2': 'IMC2',
'IMC3': 'IMC3',
'IMC4': 'IMC4',
'IMJG': 'Accom SphereOUS MJPEG with Alpha-channel',
'IPDV': 'I-O Data Device Giga AVI DV Codec',
'IPJ2': 'Image Power JPEG2000',
'IR21': 'Intel Indeo 2.1',
'IRAW': 'Intel YUV Uncompressed',
'IUYV': 'Interlaced version of UYVY (line order 0,2,4 then 1,3,5 etc)',
'IV30': 'Ligos Indeo 3.0',
'IV31': 'Ligos Indeo 3.1',
'IV32': 'Ligos Indeo 3.2',
'IV33': 'Ligos Indeo 3.3',
'IV34': 'Ligos Indeo 3.4',
'IV35': 'Ligos Indeo 3.5',
'IV36': 'Ligos Indeo 3.6',
'IV37': 'Ligos Indeo 3.7',
'IV38': 'Ligos Indeo 3.8',
'IV39': 'Ligos Indeo 3.9',
'IV40': 'Ligos Indeo Interactive 4.0',
'IV41': 'Ligos Indeo Interactive 4.1',
'IV42': 'Ligos Indeo Interactive 4.2',
'IV43': 'Ligos Indeo Interactive 4.3',
'IV44': 'Ligos Indeo Interactive 4.4',
'IV45': 'Ligos Indeo Interactive 4.5',
'IV46': 'Ligos Indeo Interactive 4.6',
'IV47': 'Ligos Indeo Interactive 4.7',
'IV48': 'Ligos Indeo Interactive 4.8',
'IV49': 'Ligos Indeo Interactive 4.9',
'IV50': 'Ligos Indeo Interactive 5.0',
'IY41': 'Interlaced version of Y41P (line order 0,2,4,...,1,3,5...)',
'IYU1': '12 bit format used in mode 2 of the IEEE 1394 Digital Camera 1.04 spec',
'IYU2': '24 bit format used in mode 2 of the IEEE 1394 Digital Camera 1.04 spec',
'IYUV': 'Intel Indeo iYUV 4:2:0',
'JBYR': 'Kensington JBYR',
'JFIF': 'Motion JPEG (FFmpeg)',
'JPEG': 'Still Image JPEG DIB',
'JPG ': 'JPEG compressed',
'JPGL': 'Webcam JPEG Light',
'KMVC': 'Karl Morton\'s Video Codec',
'KPCD': 'Kodak Photo CD',
'L261': 'Lead Technologies H.261',
'L263': 'Lead Technologies H.263',
'LAGS': 'Lagarith LossLess',
'LBYR': 'Creative WebCam codec',
'LCMW': 'Lead Technologies Motion CMW Codec',
'LCW2': 'LEADTools MCMW 9Motion Wavelet)',
'LEAD': 'LEAD Video Codec',
'LGRY': 'Lead Technologies Grayscale Image',
'LJ2K': 'LEADTools JPEG2000',
'LJPG': 'LEAD MJPEG Codec',
'LMP2': 'LEADTools MPEG2',
'LOCO': 'LOCO Lossless Codec',
'LSCR': 'LEAD Screen Capture',
'LSVM': 'Vianet Lighting Strike Vmail (Streaming)',
'LZO1': 'LZO compressed (lossless codec)',
'M261': 'Microsoft H.261',
'M263': 'Microsoft H.263',
'M4CC': 'ESS MPEG4 Divio codec',
'M4S2': 'Microsoft MPEG-4 (M4S2)',
'MC12': 'ATI Motion Compensation Format (MC12)',
'MC24': 'MainConcept Motion JPEG Codec',
'MCAM': 'ATI Motion Compensation Format (MCAM)',
'MCZM': 'Theory MicroCosm Lossless 64bit RGB with Alpha-channel',
'MDVD': 'Alex MicroDVD Video (hacked MS MPEG-4)',
'MDVF': 'Pinnacle DV/DV50/DVHD100',
'MHFY': 'A.M.Paredes mhuffyYUV (LossLess)',
'MJ2C': 'Morgan Multimedia Motion JPEG2000',
'MJPA': 'Pinnacle ReelTime MJPG hardware codec',
'MJPB': 'Motion JPEG codec',
'MJPG': 'Motion JPEG DIB',
'MJPX': 'Pegasus PICVideo Motion JPEG',
'MMES': 'Matrox MPEG-2 I-frame',
'MNVD': 'MindBend MindVid LossLess',
'MP2A': 'MPEG-2 Audio',
'MP2T': 'MPEG-2 Transport Stream',
'MP2V': 'MPEG-2 Video',
'MP41': 'Microsoft MPEG-4 V1 (enhansed H263)',
'MP42': 'Microsoft MPEG-4 (low-motion)',
'MP43': 'Microsoft MPEG-4 (fast-motion)',
'MP4A': 'MPEG-4 Audio',
'MP4S': 'Microsoft MPEG-4 (MP4S)',
'MP4T': 'MPEG-4 Transport Stream',
'MP4V': 'Apple QuickTime MPEG-4 native',
'MPEG': 'MPEG-1',
'MPG1': 'FFmpeg-1',
'MPG2': 'FFmpeg-1',
'MPG3': 'Same as Low motion DivX MPEG-4',
'MPG4': 'Microsoft MPEG-4 Video High Speed Compressor',
'MPGI': 'Sigma Designs MPEG',
'MPNG': 'Motion PNG codec',
'MRCA': 'Martin Regen Codec',
'MRLE': 'Run Length Encoding',
'MSS1': 'Windows Screen Video',
'MSS2': 'Windows Media 9',
'MSUC': 'MSU LossLess',
'MSVC': 'Microsoft Video 1',
'MSZH': 'Lossless codec (ZIP compression)',
'MTGA': 'Motion TGA images (24, 32 bpp)',
'MTX1': 'Matrox MTX1',
'MTX2': 'Matrox MTX2',
'MTX3': 'Matrox MTX3',
'MTX4': 'Matrox MTX4',
'MTX5': 'Matrox MTX5',
'MTX6': 'Matrox MTX6',
'MTX7': 'Matrox MTX7',
'MTX8': 'Matrox MTX8',
'MTX9': 'Matrox MTX9',
'MV12': 'MV12',
'MVI1': 'Motion Pixels MVI',
'MVI2': 'Motion Pixels MVI',
'MWV1': 'Aware Motion Wavelets',
'MYUV': 'Media-100 844/X Uncompressed',
'NAVI': 'nAVI',
'NDIG': 'Ahead Nero Digital MPEG-4 Codec',
'NHVU': 'NVidia Texture Format (GEForce 3)',
'NO16': 'Theory None16 64bit uncompressed RAW',
'NT00': 'NewTek LigtWave HDTV YUV with Alpha-channel',
'NTN1': 'Nogatech Video Compression 1',
'NTN2': 'Nogatech Video Compression 2 (GrabBee hardware coder)',
'NUV1': 'NuppelVideo',
'NV12': '8-bit Y plane followed by an interleaved U/V plane with 2x2 subsampling',
'NV21': 'As NV12 with U and V reversed in the interleaved plane',
'NVDS': 'nVidia Texture Format',
'NVHS': 'NVidia Texture Format (GEForce 3)',
'NVS0': 'nVidia GeForce Texture',
'NVS1': 'nVidia GeForce Texture',
'NVS2': 'nVidia GeForce Texture',
'NVS3': 'nVidia GeForce Texture',
'NVS4': 'nVidia GeForce Texture',
'NVS5': 'nVidia GeForce Texture',
'NVT0': 'nVidia GeForce Texture',
'NVT1': 'nVidia GeForce Texture',
'NVT2': 'nVidia GeForce Texture',
'NVT3': 'nVidia GeForce Texture',
'NVT4': 'nVidia GeForce Texture',
'NVT5': 'nVidia GeForce Texture',
'PDVC': 'I-O Data Device Digital Video Capture DV codec',
'PGVV': 'Radius Video Vision',
'PHMO': 'IBM Photomotion',
'PIM1': 'Pegasus Imaging',
'PIM2': 'Pegasus Imaging',
'PIMJ': 'Pegasus Imaging Lossless JPEG',
'PIXL': 'MiroVideo XL (Motion JPEG)',
'PNG ': 'Apple PNG',
'PNG1': 'Corecodec.org CorePNG Codec',
'PVEZ': 'Horizons Technology PowerEZ',
'PVMM': 'PacketVideo Corporation MPEG-4',
'PVW2': 'Pegasus Imaging Wavelet Compression',
'PVWV': 'Pegasus Imaging Wavelet 2000',
'PXLT': 'Apple Pixlet (Wavelet)',
'Q1.0': 'Q-Team QPEG 1.0 (www.q-team.de)',
'Q1.1': 'Q-Team QPEG 1.1 (www.q-team.de)',
'QDGX': 'Apple QuickDraw GX',
'QPEG': 'Q-Team QPEG 1.0',
'QPEQ': 'Q-Team QPEG 1.1',
'R210': 'BlackMagic YUV (Quick Time)',
'R411': 'Radius DV NTSC YUV',
'R420': 'Radius DV PAL YUV',
'RAVI': 'GroupTRON ReferenceAVI codec (dummy for MPEG compressor)',
'RAV_': 'GroupTRON ReferenceAVI codec (dummy for MPEG compressor)',
'RAW ': 'Full Frames (Uncompressed)',
'RGB ': 'Full Frames (Uncompressed)',
'RGB(15)': 'Uncompressed RGB15 5:5:5',
'RGB(16)': 'Uncompressed RGB16 5:6:5',
'RGB(24)': 'Uncompressed RGB24 8:8:8',
'RGB1': 'Uncompressed RGB332 3:3:2',
'RGBA': 'Raw RGB with alpha',
'RGBO': 'Uncompressed RGB555 5:5:5',
'RGBP': 'Uncompressed RGB565 5:6:5',
'RGBQ': 'Uncompressed RGB555X 5:5:5 BE',
'RGBR': 'Uncompressed RGB565X 5:6:5 BE',
'RGBT': 'Computer Concepts 32-bit support',
'RL4 ': 'RLE 4bpp RGB',
'RL8 ': 'RLE 8bpp RGB',
'RLE ': 'Microsoft Run Length Encoder',
'RLE4': 'Run Length Encoded 4',
'RLE8': 'Run Length Encoded 8',
'RMP4': 'REALmagic MPEG-4 Video Codec',
'ROQV': 'Id RoQ File Video Decoder',
'RPZA': 'Apple Video 16 bit "road pizza"',
'RT21': 'Intel Real Time Video 2.1',
'RTV0': 'NewTek VideoToaster',
'RUD0': 'Rududu video codec',
'RV10': 'RealVideo codec',
'RV13': 'RealVideo codec',
'RV20': 'RealVideo G2',
'RV30': 'RealVideo 8',
'RV40': 'RealVideo 9',
'RVX ': 'Intel RDX (RVX )',
'S263': 'Sorenson Vision H.263',
'S422': 'Tekram VideoCap C210 YUV 4:2:2',
'SAMR': 'Adaptive Multi-Rate (AMR) audio codec',
'SAN3': 'MPEG-4 codec (direct copy of DivX 3.11a)',
'SDCC': 'Sun Communication Digital Camera Codec',
'SEDG': 'Samsung MPEG-4 codec',
'SFMC': 'CrystalNet Surface Fitting Method',
'SHR0': 'BitJazz SheerVideo',
'SHR1': 'BitJazz SheerVideo',
'SHR2': 'BitJazz SheerVideo',
'SHR3': 'BitJazz SheerVideo',
'SHR4': 'BitJazz SheerVideo',
'SHR5': 'BitJazz SheerVideo',
'SHR6': 'BitJazz SheerVideo',
'SHR7': 'BitJazz SheerVideo',
'SJPG': 'CUseeMe Networks Codec',
'SL25': 'SoftLab-NSK DVCPRO',
'SL50': 'SoftLab-NSK DVCPRO50',
'SLDV': 'SoftLab-NSK Forward DV Draw codec',
'SLIF': 'SoftLab-NSK MPEG2 I-frames',
'SLMJ': 'SoftLab-NSK Forward MJPEG',
'SMC ': 'Apple Graphics (SMC) codec (256 color)',
'SMSC': 'Radius SMSC',
'SMSD': 'Radius SMSD',
'SMSV': 'WorldConnect Wavelet Video',
'SNOW': 'SNOW codec',
'SP40': 'SunPlus YUV',
'SP44': 'SunPlus Aiptek MegaCam Codec',
'SP53': 'SunPlus Aiptek MegaCam Codec',
'SP54': 'SunPlus Aiptek MegaCam Codec',
'SP55': 'SunPlus Aiptek MegaCam Codec',
'SP56': 'SunPlus Aiptek MegaCam Codec',
'SP57': 'SunPlus Aiptek MegaCam Codec',
'SP58': 'SunPlus Aiptek MegaCam Codec',
'SPIG': 'Radius Spigot',
'SPLC': 'Splash Studios ACM Audio Codec',
'SPRK': 'Sorenson Spark',
'SQZ2': 'Microsoft VXTreme Video Codec V2',
'STVA': 'ST CMOS Imager Data (Bayer)',
'STVB': 'ST CMOS Imager Data (Nudged Bayer)',
'STVC': 'ST CMOS Imager Data (Bunched)',
'STVX': 'ST CMOS Imager Data (Extended CODEC Data Format)',
'STVY': 'ST CMOS Imager Data (Extended CODEC Data Format with Correction Data)',
'SV10': 'Sorenson Video R1',
'SVQ1': 'Sorenson Video R3',
'SVQ3': 'Sorenson Video 3 (Apple Quicktime 5)',
'SWC1': 'MainConcept Motion JPEG Codec',
'T420': 'Toshiba YUV 4:2:0',
'TGA ': 'Apple TGA (with Alpha-channel)',
'THEO': 'FFVFW Supported Codec',
'TIFF': 'Apple TIFF (with Alpha-channel)',
'TIM2': 'Pinnacle RAL DVI',
'TLMS': 'TeraLogic Motion Intraframe Codec (TLMS)',
'TLST': 'TeraLogic Motion Intraframe Codec (TLST)',
'TM20': 'Duck TrueMotion 2.0',
'TM2A': 'Duck TrueMotion Archiver 2.0',
'TM2X': 'Duck TrueMotion 2X',
'TMIC': 'TeraLogic Motion Intraframe Codec (TMIC)',
'TMOT': 'Horizons Technology TrueMotion S',
'TR20': 'Duck TrueMotion RealTime 2.0',
'TRLE': 'Akula Alpha Pro Custom AVI (LossLess)',
'TSCC': 'TechSmith Screen Capture Codec',
'TV10': 'Tecomac Low-Bit Rate Codec',
'TVJP': 'TrueVision Field Encoded Motion JPEG',
'TVMJ': 'Truevision TARGA MJPEG Hardware Codec',
'TY0N': 'Trident TY0N',
'TY2C': 'Trident TY2C',
'TY2N': 'Trident TY2N',
'U263': 'UB Video StreamForce H.263',
'U<Y ': 'Discreet UC YUV 4:2:2:4 10 bit',
'U<YA': 'Discreet UC YUV 4:2:2:4 10 bit (with Alpha-channel)',
'UCOD': 'eMajix.com ClearVideo',
'ULTI': 'IBM Ultimotion',
'UMP4': 'UB Video MPEG 4',
'UYNV': 'UYVY',
'UYVP': 'YCbCr 4:2:2',
'UYVU': 'SoftLab-NSK Forward YUV codec',
'UYVY': 'UYVY 4:2:2 byte ordering',
'V210': 'Optibase VideoPump 10-bit 4:2:2 Component YCbCr',
'V261': 'Lucent VX2000S',
'V422': '24 bit YUV 4:2:2 Format',
'V655': '16 bit YUV 4:2:2 Format',
'VBLE': 'MarcFD VBLE Lossless Codec',
'VCR1': 'ATI VCR 1.0',
'VCR2': 'ATI VCR 2.0',
'VCR3': 'ATI VCR 3.0',
'VCR4': 'ATI VCR 4.0',
'VCR5': 'ATI VCR 5.0',
'VCR6': 'ATI VCR 6.0',
'VCR7': 'ATI VCR 7.0',
'VCR8': 'ATI VCR 8.0',
'VCR9': 'ATI VCR 9.0',
'VDCT': 'Video Maker Pro DIB',
'VDOM': 'VDOnet VDOWave',
'VDOW': 'VDOnet VDOLive (H.263)',
'VDST': 'VirtualDub remote frameclient ICM driver',
'VDTZ': 'Darim Vison VideoTizer YUV',
'VGPX': 'VGPixel Codec',
'VIDM': 'DivX 5.0 Pro Supported Codec',
'VIDS': 'YUV 4:2:2 CCIR 601 for V422',
'VIFP': 'VIFP',
'VIV1': 'Vivo H.263',
'VIV2': 'Vivo H.263',
'VIVO': 'Vivo H.263 v2.00',
'VIXL': 'Miro Video XL',
'VLV1': 'Videologic VLCAP.DRV',
'VP30': 'On2 VP3.0',
'VP31': 'On2 VP3.1',
'VP40': 'On2 TrueCast VP4',
'VP50': 'On2 TrueCast VP5',
'VP60': 'On2 TrueCast VP6',
'VP61': 'On2 TrueCast VP6.1',
'VP62': 'On2 TrueCast VP6.2',
'VP70': 'On2 TrueMotion VP7',
'VQC1': 'Vector-quantised codec 1',
'VQC2': 'Vector-quantised codec 2',
'VR21': 'BlackMagic YUV (Quick Time)',
'VSSH': 'Vanguard VSS H.264',
'VSSV': 'Vanguard Software Solutions Video Codec',
'VSSW': 'Vanguard VSS H.264',
'VTLP': 'Alaris VideoGramPixel Codec',
'VX1K': 'VX1000S Video Codec',
'VX2K': 'VX2000S Video Codec',
'VXSP': 'VX1000SP Video Codec',
'VYU9': 'ATI Technologies YUV',
'VYUY': 'ATI Packed YUV Data',
'WBVC': 'Winbond W9960',
'WHAM': 'Microsoft Video 1 (WHAM)',
'WINX': 'Winnov Software Compression',
'WJPG': 'AverMedia Winbond JPEG',
'WMV1': 'Windows Media Video V7',
'WMV2': 'Windows Media Video V8',
'WMV3': 'Windows Media Video V9',
'WMVA': 'WMVA codec',
'WMVP': 'Windows Media Video V9',
'WNIX': 'WniWni Codec',
'WNV1': 'Winnov Hardware Compression',
'WNVA': 'Winnov hw compress',
'WRLE': 'Apple QuickTime BMP Codec',
'WRPR': 'VideoTools VideoServer Client Codec',
'WV1F': 'WV1F codec',
'WVLT': 'IllusionHope Wavelet 9/7',
'WVP2': 'WVP2 codec',
'X263': 'Xirlink H.263',
'X264': 'XiWave GNU GPL x264 MPEG-4 Codec',
'XLV0': 'NetXL Video Decoder',
'XMPG': 'Xing MPEG (I-Frame only)',
'XVID': 'XviD MPEG-4',
'XVIX': 'Based on XviD MPEG-4 codec',
'XWV0': 'XiWave Video Codec',
'XWV1': 'XiWave Video Codec',
'XWV2': 'XiWave Video Codec',
'XWV3': 'XiWave Video Codec (Xi-3 Video)',
'XWV4': 'XiWave Video Codec',
'XWV5': 'XiWave Video Codec',
'XWV6': 'XiWave Video Codec',
'XWV7': 'XiWave Video Codec',
'XWV8': 'XiWave Video Codec',
'XWV9': 'XiWave Video Codec',
'XXAN': 'XXAN',
'XYZP': 'Extended PAL format XYZ palette',
'Y211': 'YUV 2:1:1 Packed',
'Y216': 'Pinnacle TARGA CineWave YUV (Quick Time)',
'Y411': 'YUV 4:1:1 Packed',
'Y41B': 'YUV 4:1:1 Planar',
'Y41P': 'PC1 4:1:1',
'Y41T': 'PC1 4:1:1 with transparency',
'Y422': 'Y422',
'Y42B': 'YUV 4:2:2 Planar',
'Y42T': 'PCI 4:2:2 with transparency',
'Y444': 'IYU2',
'Y8 ': 'Grayscale video',
'Y800': 'Simple grayscale video',
'YC12': 'Intel YUV12 Codec',
'YMPG': 'YMPEG Alpha',
'YU12': 'ATI YV12 4:2:0 Planar',
'YU92': 'Intel - YUV',
'YUNV': 'YUNV',
'YUV2': 'Apple Component Video (YUV 4:2:2)',
'YUV8': 'Winnov Caviar YUV8',
'YUV9': 'Intel YUV9',
'YUVP': 'YCbCr 4:2:2',
'YUY2': 'Uncompressed YUV 4:2:2',
'YUYV': 'Canopus YUV',
'YV12': 'YVU12 Planar',
'YV16': 'Elecard YUV 4:2:2 Planar',
'YV92': 'Intel Smart Video Recorder YVU9',
'YVU9': 'Intel YVU9 Planar',
'YVYU': 'YVYU 4:2:2 byte ordering',
'ZLIB': 'ZLIB',
'ZPEG': 'Metheus Video Zipper',
'ZYGO': 'ZyGo Video Codec'
}
# make it fool prove
for code, value in FOURCC.items():
if not code.upper() in FOURCC:
FOURCC[code.upper()] = value
if code.endswith(' '):
FOURCC[code.strip().upper()] = value
|
Branlala/docker-sickbeardfr
|
sickbeard/lib/enzyme/fourcc.py
|
Python
|
mit
| 31,535
|
[
"CRYSTAL"
] |
d88b7dc0a42f79ffdbc58fb0223a5c80bfff854c492f18cabd18140a5afff22d
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
from preprocessing import preprocess_utility as ult
from datasets.imagenet_dataset import ImagenetData
# from datasets import imagenet_dataset
from models import mobilenet_model
from models import vgg_model
FLAGS = tf.app.flags.FLAGS
IMAGE_SIZE = ult.IMAGE_SIZE
NUM_CLASSES = ult.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = ult.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = ult.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 1.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.75 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
TOWER_NAME = 'tower'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
# data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
dataset_train = ImagenetData(subset='train')
dataset_test = ImagenetData(subset='validation')
assert dataset_train.data_files()
assert dataset_test.data_files()
imgs_train, labels_train= ult.distorted_inputs(
dataset_train,
isTrain=True,
batch_size=FLAGS.batch_size,
num_preprocess_threads=FLAGS.num_preprocess_threads)
imgs_test, labels_test = ult.inputs(
dataset_test,
batch_size=FLAGS.batch_size,
num_preprocess_threads=FLAGS.num_preprocess_threads)
return (imgs_train, labels_train, imgs_test, labels_test)
# return ult.distorted_inputs(
# dataset,
# isTrain,
# batch_size=FLAGS.batch_size,
# num_preprocess_threads=FLAGS.num_preprocess_threads)
def inference(images, isTrain, isLoad):
"""Build the vggnet model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
if FLAGS.model_name == 'vggnet':
model = vgg_model.vggnet(isLoad, isTrain)
elif FLAGS.model_name == 'mobilenet':
model = mobilenet_model.mobilenet(isLoad, isTrain)
keep_prob = tf.cond(isTrain, lambda: 0.5, lambda: 1.0)
pred = model.conv_network(images, keep_prob)
return pred
def eval(logits, labels):
labels = tf.cast(labels, tf.int64)
predictions = tf.argmax(logits, 1)
# top5_acc = tf.metrics.recall_at_k(
# labels = labels,
# predictions = logits,
# k = 5
# )
# acc = tf.metrics.accuracy(
# labels = labels,
# predictions = predictions
# )
acc = tf.reduce_mean(tf.cast(tf.equal(predictions, labels), tf.float32))
top5_acc = tf.reduce_mean(tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32))
return (acc, top5_acc)
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
# labels = tf.cast(labels, tf.float32)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def pickle_save(sess):
if FLAGS.model_name == 'vggnet':
vgg_model.save_model(sess)
elif FLAGS.model_name == 'mobilenet':
mobilenet_model.save_model(sess)
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name +' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
|
Aaron-Zhao123/nn_library
|
model_wrapper.py
|
Python
|
mit
| 6,816
|
[
"Gaussian"
] |
0de6ac1fc70f38e8d34ce1fb4c03af741022855060ac07892218b0c38a305aa8
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Custom astroid checker for set_trace calls."""
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker, utils
class SetTraceChecker(BaseChecker):
"""Custom astroid checker for set_trace calls."""
__implements__ = IAstroidChecker
name = 'settrace'
msgs = {
'E9101': ('set_trace call found', 'set-trace', None),
}
priority = -1
@utils.check_messages('set-trace')
def visit_call(self, node):
"""Visit a Call node."""
if hasattr(node, 'func'):
infer = utils.safe_infer(node.func)
if infer:
if getattr(node.func, 'name', None) == 'set_trace':
self.add_message('set-trace', node=node)
def register(linter):
"""Register this checker."""
linter.register_checker(SetTraceChecker(linter))
|
airodactyl/qutebrowser
|
scripts/dev/pylint_checkers/qute_pylint/settrace.py
|
Python
|
gpl-3.0
| 1,643
|
[
"VisIt"
] |
49202125de6910d3172af6aa16d137cb4351cbed0fe5e0537239b63101b807ea
|
"""
nflgame is an API to retrieve and read NFL Game Center JSON data.
It can work with real-time data, which can be used for fantasy football.
nflgame works by parsing the same JSON data that powers NFL.com's live
GameCenter. Therefore, nflgame can be used to report game statistics while
a game is being played.
The package comes pre-loaded with game data from every pre- and regular
season game from 2009 up until the present (I try to update it every week).
Therefore, querying such data does not actually ping NFL.com.
However, if you try to search for data in a game that is being currently
played, the JSON data will be downloaded from NFL.com at each request (so be
careful not to inspect for data too many times while a game is being played).
If you ask for data for a particular game that hasn't been cached to disk
but is no longer being played, it will be automatically cached to disk
so that no further downloads are required.
Here's a quick teaser to find the top 5 running backs by rushing yards in the
first week of the 2013 season:
#!python
import nflgame
games = nflgame.games(2013, week=1)
players = nflgame.combine_game_stats(games)
for p in players.rushing().sort('rushing_yds').limit(5):
msg = '%s %d carries for %d yards and %d TDs'
print msg % (p, p.rushing_att, p.rushing_yds, p.rushing_tds)
And the output is:
L.McCoy 31 carries for 184 yards and 1 TDs
T.Pryor 13 carries for 112 yards and 0 TDs
S.Vereen 14 carries for 101 yards and 0 TDs
A.Peterson 18 carries for 93 yards and 2 TDs
R.Bush 21 carries for 90 yards and 0 TDs
Or you could find the top 5 passing plays in the same time period:
#!python
import nflgame
games = nflgame.games(2013, week=1)
plays = nflgame.combine_plays(games)
for p in plays.sort('passing_yds').limit(5):
print p
And the output is:
(DEN, DEN 22, Q4, 3 and 8) (4:42) (Shotgun) P.Manning pass
short left to D.Thomas for 78 yards, TOUCHDOWN. Penalty on
BAL-E.Dumervil, Defensive Offside, declined.
(DET, DET 23, Q3, 3 and 7) (5:58) (Shotgun) M.Stafford pass short
middle to R.Bush for 77 yards, TOUCHDOWN.
(NYG, NYG 30, Q2, 1 and 10) (2:01) (No Huddle, Shotgun) E.Manning
pass deep left to V.Cruz for 70 yards, TOUCHDOWN. Pass complete on
a fly pattern.
(NO, NO 24, Q2, 2 and 6) (5:11) (Shotgun) D.Brees pass deep left to
K.Stills to ATL 9 for 67 yards (R.McClain; R.Alford). Pass 24, YAC
43
(NYG, NYG 20, Q1, 1 and 10) (13:04) E.Manning pass short middle
to H.Nicks pushed ob at DAL 23 for 57 yards (M.Claiborne). Pass
complete on a slant pattern.
If you aren't a programmer, then the
[tutorial for non programmers](http://goo.gl/y05fVj) is for you.
If you need help, please come visit us at IRC/FreeNode on channel `#nflgame`.
If you've never used IRC before, then you can
[use a web client](http://webchat.freenode.net/?channels=%23nflgame).
(Enter any nickname you like, make sure the channel is `#nflgame`, fill in
the captcha and hit connect.)
Failing IRC, the second fastest way to get help is to
[open a new issue on the
tracker](https://github.com/BurntSushi/nflgame/issues/new).
There are several active contributors to nflgame that watch the issue tracker.
We tend to respond fairly quickly!
"""
from functools import reduce
from collections import OrderedDict
import itertools
import sys
import nflgame.game
import nflgame.live
import nflgame.player
import nflgame.sched
import nflgame.seq
from nflgame.version import __version__
assert OrderedDict # Asserting the import for static analysis.
VERSION = __version__ # Deprecated. Backwards compatibility.
NoPlayers = nflgame.seq.GenPlayerStats(None)
"""
NoPlayers corresponds to the identity element of a Players sequences.
Namely, adding it to any other Players sequence has no effect.
"""
players = nflgame.player._create_players()
"""
A dict of all players and meta information about each player keyed
by GSIS ID. (The identifiers used by NFL.com GameCenter.)
"""
teams = [
['ARI', 'Arizona', 'Cardinals', 'Arizona Cardinals'],
['ATL', 'Atlanta', 'Falcons', 'Atlanta Falcons'],
['BAL', 'Baltimore', 'Ravens', 'Baltimore Ravens'],
['BUF', 'Buffalo', 'Bills', 'Buffalo Bills'],
['CAR', 'Carolina', 'Panthers', 'Carolina Panthers'],
['CHI', 'Chicago', 'Bears', 'Chicago Bears'],
['CIN', 'Cincinnati', 'Bengals', 'Cincinnati Bengals'],
['CLE', 'Cleveland', 'Browns', 'Cleveland Browns'],
['DAL', 'Dallas', 'Cowboys', 'Dallas Cowboys'],
['DEN', 'Denver', 'Broncos', 'Denver Broncos'],
['DET', 'Detroit', 'Lions', 'Detroit Lions'],
['GB', 'Green Bay', 'Packers', 'Green Bay Packers', 'G.B.', 'GNB'],
['HOU', 'Houston', 'Texans', 'Houston Texans'],
['IND', 'Indianapolis', 'Colts', 'Indianapolis Colts'],
['JAX', 'JAC', 'Jacksonville', 'Jaguars', 'Jacksonville Jaguars'],
['KC', 'Kansas City', 'Chiefs', 'Kansas City Chiefs', 'K.C.', 'KAN'],
['MIA', 'Miami', 'Dolphins', 'Miami Dolphins'],
['MIN', 'Minnesota', 'Vikings', 'Minnesota Vikings'],
['NE', 'New England', 'Patriots', 'New England Patriots', 'N.E.', 'NWE'],
['NO', 'New Orleans', 'Saints', 'New Orleans Saints', 'N.O.', 'NOR'],
['NYG', 'Giants', 'New York Giants', 'N.Y.G.'],
['NYJ', 'Jets', 'New York Jets', 'N.Y.J.'],
['OAK', 'Oakland', 'Raiders', 'Oakland Raiders'],
['PHI', 'Philadelphia', 'Eagles', 'Philadelphia Eagles'],
['PIT', 'Pittsburgh', 'Steelers', 'Pittsburgh Steelers'],
['SD', 'San Diego', 'Chargers', 'San Diego Chargers', 'S.D.', 'SDG'],
['SEA', 'Seattle', 'Seahawks', 'Seattle Seahawks'],
['SF', 'San Francisco', '49ers', 'San Francisco 49ers', 'S.F.', 'SFO'],
['LA', 'STL', 'Los Angeles', 'St. Louis', 'Rams', 'Los Angeles Rams', \
'St. Louis Rams', 'S.T.L.'],
['TB', 'Tampa Bay', 'Buccaneers', 'Tampa Bay Buccaneers', 'T.B.', 'TAM'],
['TEN', 'Tennessee', 'Titans', 'Tennessee Titans'],
['WAS', 'Washington', 'Redskins', 'Washington Redskins', 'WSH'],
]
"""
A list of all teams. Each item is a list of different ways to
describe a team. (i.e., JAC, JAX, Jacksonville, Jaguars, etc.).
The first item in each list is always the standard NFL.com
team abbreviation (two or three letters).
"""
def find(name, team=None):
"""
Finds a player (or players) with a name matching (case insensitive)
name and returns them as a list.
If team is not None, it is used as an additional search constraint.
"""
hits = []
for player in players.values():
if player.name.lower() == name.lower():
if team is None or team.lower() == player.team.lower():
hits.append(player)
return hits
def standard_team(team):
"""
Returns a standard abbreviation when team corresponds to a team in
nflgame.teams (case insensitive). All known variants of a team name are
searched. If no team is found, None is returned.
"""
team = team.lower()
for variants in teams:
for variant in variants:
if team == variant.lower():
return variants[0]
return None
def games(year, week=None, home=None, away=None, kind='REG', started=False):
"""
games returns a list of all games matching the given criteria. Each
game can then be queried for player statistics and information about
the game itself (score, winner, scoring plays, etc.).
As a special case, if the home and away teams are set to the same team,
then all games where that team played are returned.
The kind parameter specifies whether to fetch preseason, regular season
or postseason games. Valid values are PRE, REG and POST.
The week parameter is relative to the value of the kind parameter, and
may be set to a list of week numbers.
In the regular season, the week parameter corresponds to the normal
week numbers 1 through 17. Similarly in the preseason, valid week numbers
are 1 through 4. In the post season, the week number corresponds to the
numerical round of the playoffs. So the wild card round is week 1,
the divisional round is week 2, the conference round is week 3
and the Super Bowl is week 4.
The year parameter specifies the season, and not necessarily the actual
year that a game was played in. For example, a Super Bowl taking place
in the year 2011 actually belongs to the 2010 season. Also, the year
parameter may be set to a list of seasons just like the week parameter.
Note that if a game's JSON data is not cached to disk, it is retrieved
from the NFL web site. A game's JSON data is *only* cached to disk once
the game is over, so be careful with the number of times you call this
while a game is going on. (i.e., don't piss off NFL.com.)
If started is True, then only games that have already started (or are
about to start in less than 5 minutes) will be returned. Note that the
started parameter requires pytz to be installed. This is useful when
you only want to collect stats from games that have JSON data available
(as opposed to waiting for a 404 error from NFL.com).
"""
return list(games_gen(year, week, home, away, kind, started))
def games_gen(year, week=None, home=None, away=None,
kind='REG', started=False):
"""
games returns a generator of all games matching the given criteria. Each
game can then be queried for player statistics and information about
the game itself (score, winner, scoring plays, etc.).
As a special case, if the home and away teams are set to the same team,
then all games where that team played are returned.
The kind parameter specifies whether to fetch preseason, regular season
or postseason games. Valid values are PRE, REG and POST.
The week parameter is relative to the value of the kind parameter, and
may be set to a list of week numbers.
In the regular season, the week parameter corresponds to the normal
week numbers 1 through 17. Similarly in the preseason, valid week numbers
are 1 through 4. In the post season, the week number corresponds to the
numerical round of the playoffs. So the wild card round is week 1,
the divisional round is week 2, the conference round is week 3
and the Super Bowl is week 4.
The year parameter specifies the season, and not necessarily the actual
year that a game was played in. For example, a Super Bowl taking place
in the year 2011 actually belongs to the 2010 season. Also, the year
parameter may be set to a list of seasons just like the week parameter.
Note that if a game's JSON data is not cached to disk, it is retrieved
from the NFL web site. A game's JSON data is *only* cached to disk once
the game is over, so be careful with the number of times you call this
while a game is going on. (i.e., don't piss off NFL.com.)
If started is True, then only games that have already started (or are
about to start in less than 5 minutes) will be returned. Note that the
started parameter requires pytz to be installed. This is useful when
you only want to collect stats from games that have JSON data available
(as opposed to waiting for a 404 error from NFL.com).
"""
infos = _search_schedule(year, week, home, away, kind, started)
if not infos:
yield None
else:
for info in infos:
g = nflgame.game.Game(info['eid'])
if g is None:
continue
yield g
def one(year, week, home, away, kind='REG', started=False):
"""
one returns a single game matching the given criteria. The
game can then be queried for player statistics and information about
the game itself (score, winner, scoring plays, etc.).
one returns either a single game or no games. If there are multiple games
matching the given criteria, an assertion is raised.
The kind parameter specifies whether to fetch preseason, regular season
or postseason games. Valid values are PRE, REG and POST.
The week parameter is relative to the value of the kind parameter, and
may be set to a list of week numbers.
In the regular season, the week parameter corresponds to the normal
week numbers 1 through 17. Similarly in the preseason, valid week numbers
are 1 through 4. In the post season, the week number corresponds to the
numerical round of the playoffs. So the wild card round is week 1,
the divisional round is week 2, the conference round is week 3
and the Super Bowl is week 4.
The year parameter specifies the season, and not necessarily the actual
year that a game was played in. For example, a Super Bowl taking place
in the year 2011 actually belongs to the 2010 season. Also, the year
parameter may be set to a list of seasons just like the week parameter.
Note that if a game's JSON data is not cached to disk, it is retrieved
from the NFL web site. A game's JSON data is *only* cached to disk once
the game is over, so be careful with the number of times you call this
while a game is going on. (i.e., don't piss off NFL.com.)
If started is True, then only games that have already started (or are
about to start in less than 5 minutes) will be returned. Note that the
started parameter requires pytz to be installed. This is useful when
you only want to collect stats from games that have JSON data available
(as opposed to waiting for a 404 error from NFL.com).
"""
infos = _search_schedule(year, week, home, away, kind, started)
if not infos:
return None
assert len(infos) == 1, 'More than one game matches the given criteria.'
return nflgame.game.Game(infos[0]['eid'])
def combine(games, plays=False):
"""
DEPRECATED. Please use one of nflgame.combine_{game,play,max}_stats
instead.
Combines a list of games into one big player sequence containing game
level statistics.
This can be used, for example, to get PlayerStat objects corresponding to
statistics across an entire week, some number of weeks or an entire season.
If the plays parameter is True, then statistics will be dervied from
play by play data. This mechanism is slower but will contain more detailed
statistics like receiver targets, yards after the catch, punt and field
goal blocks, etc.
"""
if plays:
return combine_play_stats(games)
else:
return combine_game_stats(games)
def combine_game_stats(games):
"""
Combines a list of games into one big player sequence containing game
level statistics.
This can be used, for example, to get GamePlayerStats objects corresponding
to statistics across an entire week, some number of weeks or an entire
season.
"""
return reduce(lambda ps1, ps2: ps1 + ps2,
[g.players for g in games if g is not None])
def combine_play_stats(games):
"""
Combines a list of games into one big player sequence containing play
level statistics.
This can be used, for example, to get PlayPlayerStats objects corresponding
to statistics across an entire week, some number of weeks or an entire
season.
This function should be used in lieu of combine_game_stats when more
detailed statistics such as receiver targets, yards after the catch and
punt/FG blocks are needed.
N.B. Since this combines *all* play data, this function may take a while
to complete depending on the number of games passed in.
"""
return reduce(lambda p1, p2: p1 + p2,
[g.drives.players() for g in games if g is not None])
def combine_max_stats(games):
"""
Combines a list of games into one big player sequence containing maximum
statistics based on game and play level statistics.
This can be used, for example, to get GamePlayerStats objects corresponding
to statistics across an entire week, some number of weeks or an entire
season.
This function should be used in lieu of combine_game_stats or
combine_play_stats when the best possible accuracy is desired.
"""
return reduce(lambda a, b: a + b,
[g.max_player_stats() for g in games if g is not None])
def combine_plays(games):
"""
Combines a list of games into one big play generator that can be searched
as if it were a single game.
"""
chain = itertools.chain(*[g.drives.plays() for g in games])
return nflgame.seq.GenPlays(chain)
def _search_schedule(year, week=None, home=None, away=None, kind='REG',
started=False):
"""
Searches the schedule to find the game identifiers matching the criteria
given.
The kind parameter specifies whether to fetch preseason, regular season
or postseason games. Valid values are PRE, REG and POST.
The week parameter is relative to the value of the kind parameter, and
may be set to a list of week numbers.
In the regular season, the week parameter corresponds to the normal
week numbers 1 through 17. Similarly in the preseason, valid week numbers
are 1 through 4. In the post season, the week number corresponds to the
numerical round of the playoffs. So the wild card round is week 1,
the divisional round is week 2, the conference round is week 3
and the Super Bowl is week 4.
The year parameter specifies the season, and not necessarily the actual
year that a game was played in. For example, a Super Bowl taking place
in the year 2011 actually belongs to the 2010 season. Also, the year
parameter may be set to a list of seasons just like the week parameter.
If started is True, then only games that have already started (or are
about to start in less than 5 minutes) will be returned. Note that the
started parameter requires pytz to be installed. This is useful when
you only want to collect stats from games that have JSON data available
(as opposed to waiting for a 404 error from NFL.com).
"""
infos = []
for info in nflgame.sched.games.values():
y, t, w = info['year'], info['season_type'], info['week']
h, a = info['home'], info['away']
if year is not None:
if isinstance(year, list) and y not in year:
continue
if not isinstance(year, list) and y != year:
continue
if week is not None:
if isinstance(week, list) and w not in week:
continue
if not isinstance(week, list) and w != week:
continue
if home is not None and away is not None and home == away:
if h != home and a != home:
continue
else:
if home is not None and h != home:
continue
if away is not None and a != away:
continue
if t != kind:
continue
if started:
gametime = nflgame.live._game_datetime(info)
now = nflgame.live._now()
if gametime > now and (gametime - now).total_seconds() > 300:
continue
infos.append(info)
return infos
|
onebitbrain/nflgame
|
nflgame/__init__.py
|
Python
|
unlicense
| 19,236
|
[
"VisIt"
] |
1a182a4f5817b0d090568a3f462d97ab4f886f3c5573b7e714cc7c110d12e2fc
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Execute computations asynchronously using threads or processes."""
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
from base import (FIRST_COMPLETED,
FIRST_EXCEPTION,
ALL_COMPLETED,
CancelledError,
TimeoutError,
Future,
Executor,
wait,
as_completed)
from process import ProcessPoolExecutor
from thread import ThreadPoolExecutor
|
mj3-16/mjtest
|
mjtest/util/concurrent/futures/__init__.py
|
Python
|
mit
| 742
|
[
"Brian"
] |
796daa7aa66995ea023031ed3e1266377b333d8c9e19fc501e2303f40082967a
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Extension for flake8 to test for certain __future__ imports"""
from __future__ import print_function
import optparse
import sys
from collections import namedtuple
try:
import argparse
except ImportError as e:
argparse = e
from ast import NodeVisitor, Str, Module, parse
__version__ = '0.4.6'
class FutureImportVisitor(NodeVisitor):
def __init__(self):
super(FutureImportVisitor, self).__init__()
self.future_imports = []
self._uses_code = False
def visit_ImportFrom(self, node):
if node.module == '__future__':
self.future_imports += [node]
def visit_Expr(self, node):
if not isinstance(node.value, Str) or node.value.col_offset != 0:
self._uses_code = True
def generic_visit(self, node):
if not isinstance(node, Module):
self._uses_code = True
super(FutureImportVisitor, self).generic_visit(node)
@property
def uses_code(self):
return self._uses_code or self.future_imports
class Flake8Argparse(object):
@classmethod
def add_options(cls, parser):
class Wrapper(object):
def add_argument(self, *args, **kwargs):
kwargs.setdefault('parse_from_config', True)
try:
parser.add_option(*args, **kwargs)
except (optparse.OptionError, TypeError):
use_config = kwargs.pop('parse_from_config')
option = parser.add_option(*args, **kwargs)
if use_config:
# flake8 2.X uses config_options to handle stuff like 'store_true'
parser.config_options.append(option.get_opt_string().lstrip('-'))
cls.add_arguments(Wrapper())
@classmethod
def add_arguments(cls, parser):
pass
Feature = namedtuple('Feature', 'index, name, optional, mandatory')
DIVISION = Feature(0, 'division', (2, 2, 0), (3, 0, 0))
ABSOLUTE_IMPORT = Feature(1, 'absolute_import', (2, 5, 0), (3, 0, 0))
WITH_STATEMENT = Feature(2, 'with_statement', (2, 5, 0), (2, 6, 0))
PRINT_FUNCTION = Feature(3, 'print_function', (2, 6, 0), (3, 0, 0))
UNICODE_LITERALS = Feature(4, 'unicode_literals', (2, 6, 0), (3, 0, 0))
GENERATOR_STOP = Feature(5, 'generator_stop', (3, 5, 0), (3, 7, 0))
NESTED_SCOPES = Feature(6, 'nested_scopes', (2, 1, 0), (2, 2, 0))
GENERATORS = Feature(7, 'generators', (2, 2, 0), (2, 3, 0))
ANNOTATIONS = Feature(8, 'annotations', (3, 7, 0), (4, 0, 0))
# Order important as it defines the error code
ALL_FEATURES = (DIVISION, ABSOLUTE_IMPORT, WITH_STATEMENT, PRINT_FUNCTION,
UNICODE_LITERALS, GENERATOR_STOP, NESTED_SCOPES, GENERATORS, ANNOTATIONS)
FEATURES = dict((feature.name, feature) for feature in ALL_FEATURES)
FEATURE_NAMES = frozenset(feature.name for feature in ALL_FEATURES)
# Make sure the features aren't messed up
assert len(FEATURES) == len(ALL_FEATURES)
assert all(feature.index == index for index, feature in enumerate(ALL_FEATURES))
class FutureImportChecker(Flake8Argparse):
version = __version__
name = 'flake8-future-import'
require_code = True
min_version = False
def __init__(self, tree, filename):
self.tree = tree
@classmethod
def add_arguments(cls, parser):
parser.add_argument('--require-code', action='store_true',
help='Do only apply to files which not only have '
'comments and (doc)strings')
parser.add_argument('--min-version', default=False,
help='The minimum version supported so that it can '
'ignore mandatory and non-existent features')
@classmethod
def parse_options(cls, options):
cls.require_code = options.require_code
min_version = options.min_version
if min_version is not False:
try:
min_version = tuple(int(num)
for num in min_version.split('.'))
except ValueError:
min_version = None
if min_version is None or len(min_version) > 3:
raise ValueError('Minimum version "{0}" not formatted '
'like "A.B.C"'.format(options.min_version))
min_version += (0, ) * (max(3 - len(min_version), 0))
cls.min_version = min_version
def _generate_error(self, future_import, present):
feature = FEATURES.get(future_import)
if feature is None:
code = 90
msg = 'does not exist'
else:
if (not present and self.min_version and
(feature.mandatory <= self.min_version or
feature.optional > self.min_version)):
return None
code = 10 + feature.index
if present:
msg = 'present'
code += 40
else:
msg = 'missing'
msg = 'FI{0} __future__ import "{1}" ' + msg
return msg.format(code, future_import)
def run(self):
visitor = FutureImportVisitor()
visitor.visit(self.tree)
if self.require_code and not visitor.uses_code:
return
present = set()
for import_node in visitor.future_imports:
for alias in import_node.names:
err = self._generate_error(alias.name, True)
if err:
yield import_node.lineno, 0, err, type(self)
present.add(alias.name)
for name in FEATURES:
if name not in present:
err = self._generate_error(name, False)
if err:
yield 1, 0, err, type(self)
def main(args):
if isinstance(argparse, ImportError):
print('argparse is required for the standalone version.')
return
parser = argparse.ArgumentParser()
choices = set(10 + feature.index for feature in FEATURES.values())
choices |= set(40 + choice for choice in choices) | set([90])
choices = set('FI{0}'.format(choice) for choice in choices)
parser.add_argument('--ignore', help='Ignore the given comma-separated '
'codes')
FutureImportChecker.add_arguments(parser)
parser.add_argument('files', nargs='+')
args = parser.parse_args(args)
FutureImportChecker.parse_options(args)
if args.ignore:
ignored = set(args.ignore.split(','))
unrecognized = ignored - choices
ignored &= choices
if unrecognized:
invalid = set()
for invalid_code in unrecognized:
no_valid = True
if not invalid:
for valid_code in choices:
if valid_code.startswith(invalid_code):
ignored.add(valid_code)
no_valid = False
if no_valid:
invalid.add(invalid_code)
if invalid:
raise ValueError('The code(s) is/are invalid: "{0}"'.format(
'", "'.join(invalid)))
else:
ignored = set()
has_errors = False
for filename in args.files:
with open(filename, 'rb') as f:
tree = parse(f.read(), filename=filename, mode='exec')
for line, char, msg, checker in FutureImportChecker(tree,
filename).run():
if msg[:4] not in ignored:
has_errors = True
print('{0}:{1}:{2}: {3}'.format(filename, line, char + 1, msg))
return has_errors
if __name__ == '__main__':
sys.exit(1 if main(sys.argv[1:]) else 0)
|
xZise/flake8-future-import
|
flake8_future_import.py
|
Python
|
mit
| 7,786
|
[
"VisIt"
] |
240962399b2c3b1531a1bb75bdc8554f2c0000a56e0461f482edaf969f19ed3c
|
##############################################################################
# Copyright (c) 2000-2016 Ericsson Telecom AB
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
#
# Balasko, Jeno
# Kovacs, Ferenc
#
##############################################################################
import os, re, types, time
import utils
class titan_publisher:
def __init__(self, logger, config):
self._logger = logger
self._config = config
self._plotter = plotter(self._logger, self._config)
self._platform = None
self._titan = None
self._regtest = None
self._perftest = None
self._eclipse = None
self._functest = None
self._vobtest = None
def __str__(self):
return self.as_text()
def titan_out(self, config, slave_name, titan_out):
""" Write TITAN results to file. """
if not self._titan:
self._titan = titan_out
if not self._titan:
return
log_dir = os.path.join(config.get('logdir', ''), slave_name)
(stamp_begin, stamp_end, \
((ret_val_dep, stdout_dep, stderr_dep), \
(ret_val_make, stdout_make, stderr_make), \
(ret_val_install, stdout_install, stderr_install))) = self._titan
file_dep = open('%s/titan.dep' % log_dir, 'wt')
file_make = open('%s/titan.make' % log_dir, 'wt')
file_install = open('%s/titan.install' % log_dir, 'wt')
file_dep.write(''.join(stdout_dep))
file_make.write(''.join(stdout_make))
file_install.write(''.join(stdout_install))
file_dep.close()
file_make.close()
file_install.close()
else:
self._logger.error('More than one TITAN builds are not allowed in the ' \
'build cycle, ignoring the results')
def regtest_out(self, config, slave_name, regtest_out):
""" Write regression test results to file. """
if not self._regtest:
self._regtest = regtest_out
if not self._regtest:
return
log_dir = os.path.join(config.get('logdir', ''), slave_name)
for rt, rt_data in self._regtest.iteritems():
(stamp_begin, stamp_end, ((ret_val_make, stdout_make, stderr_make), \
(ret_val_run, stdout_run, stderr_run))) = rt_data
file_make = open('%s/regtest-make.%s' % (log_dir, rt), 'wt')
file_run = open('%s/regtest-run.%s' % (log_dir, rt), 'wt')
file_make.write(''.join(stdout_make))
file_run.write(''.join(stdout_run))
file_make.close()
file_run.close()
else:
self._logger.error('The regression test results are already set')
def perftest_out(self, config, slave_name, perftest_out):
""" Write performance test results to file. """
if not self._perftest:
self._perftest = perftest_out
if not self._perftest:
return
log_dir = os.path.join(config.get('logdir', ''), slave_name)
for rt, rt_data in self._perftest.iteritems():
(stamp_begin, stamp_end, results) = rt_data
(ret_val_make, stdout_make, stderr_make) = results.get('make', ([], [], []))
file_make = open('%s/perftest.%s' % (log_dir, rt), 'wt')
file_make.write(''.join(stdout_make))
file_make.close()
for run in results.get('run', []):
(cps, (ret_val_run, stdout_run, stderr_run)) = run
file_run = open('%s/perftest.%s-%d' % (log_dir, rt, cps), 'wt')
file_run.write(''.join(stdout_run))
file_run.close()
else:
self._logger.error('The performance test results are already set')
def eclipse_out(self, config, slave_name, eclipse_out):
if not self._eclipse:
self._eclipse = eclipse_out
else:
self._logger.error('The Eclipse build results are already set')
def functest_out(self, config, slave_name, functest_out):
""" Store function test results for publishing. """
if not self._functest:
self._functest = functest_out
else:
self._logger.error('The function test results are already set')
def vobtest_out(self, config, slave_name, vobtest_out):
""" Store VOB test results for publishing. """
if not self._vobtest:
self._vobtest = vobtest_out
else:
self._logger.error('The VOB product results are already set')
def dump_csv(self, stamp_old, stamp_new, config, config_name, slave_name):
out_file = os.path.join(self._config.configs[config_name]['logdir'], \
os.path.join(slave_name, 'report.csv'))
try:
out_csv = open(out_file, 'wt')
out_csv.write(self.as_csv(stamp_old, stamp_new, config, config_name, slave_name))
out_csv.close()
except IOError, (errno, strerror):
self._logger.error('Cannot open file `%s\': %d: %s' \
% (out_file, errno, strerror))
def dump_txt(self, stamp_old, stamp_new, config, config_name, slave_name):
out_file = os.path.join(self._config.configs[config_name]['logdir'], \
os.path.join(slave_name, 'report.txt'))
try:
out_txt = open(out_file, 'wt')
out_txt.write(self.as_txt(stamp_old, stamp_new, config, config_name, slave_name))
out_txt.close()
except IOError, (errno, strerror):
self._logger.error('Cannot open file `%s\': %d: %s' \
% (out_file, errno, strerror))
def dump_html(self, stamp_old, stamp_new, config, config_name, slave_name):
out_file = os.path.join(self._config.configs[config_name]['logdir'], \
os.path.join(slave_name, 'report.html'))
try:
out_html = open(out_file, 'wt')
out_html.write(self.as_html(stamp_old, stamp_new, config, config_name, slave_name))
out_html.close()
except IOError, (errno, strerror):
self._logger.error('Cannot open file `%s\': %d: %s' \
% (out_file, errno, strerror))
def as_csv(self, stamp_begin, stamp_end, config, config_name, slave_name):
""" Return a very brief summary of the build. The used runtimes are not
distinguished. Neither the compile time errors and runtime errors.
Take care of the (header-)order when adding new columns.
Arguments:
stamp_begin: Start of the whole build.
stamp_end: End of the whole build.
config: The actual build configuration.
config_name: The name of the actual build configuration.
slave_name: The name of the actual slave. It's defined in the
configuration file.
Returns:
The slave specific results in a brief CSV format suitable for
notification e-mails. The master can easily generate a fancy table
from this CSV data.
"""
# `gcc' writes to the standard error.
results = []
uname_out = utils.run_cmd('uname -srmp')[1]
gcc_out = filter(lambda v: v.find(' ver') > 0, utils.run_cmd('%s -v' % (('cc' in config and len(config['cc']) > 0) and config['cc'] or 'gcc'))[2])
results.append('%s,%s,%s,%s,%s,%s' \
% (stamp_begin, stamp_end, \
uname_out[0].strip(), gcc_out[0].strip(), \
config_name, slave_name))
if self._titan:
(stamp_begin, stamp_end, \
((ret_val_dep, stdout_dep, stderr_dep), \
(ret_val_make, stdout_make, stderr_make), \
(ret_val_install, stdout_install, stderr_install))) = self._titan
if ret_val_dep or ret_val_make or ret_val_install:
results.append(',1,1,1,1,1,1')
return ''.join(results)
results.append(',0')
else:
self._logger.error('The output of TITAN build was not set')
results.append(',-1,-1,-1,-1,-1,-1')
return ''.join(results)
if self._regtest:
all_fine = True
for rt, rt_data in self._regtest.iteritems():
(stamp_begin, stamp_end, ((ret_val_make, stdout_make, stderr_make), \
(ret_val_run, stdout_run, stderr_run))) = rt_data
if ret_val_make or ret_val_run:
all_fine = False
break
results.append(all_fine and ',0' or ',1')
else:
results.append(',-1')
if self._perftest:
all_fine = True
for rt, rt_data in self._perftest.iteritems():
(stamp_begin, stamp_end, compile_run_data) = rt_data
(ret_val_make, stdout_make, stderr_make) = compile_run_data['make']
if ret_val_make:
all_fine = False
break
for run_data in compile_run_data['run']:
(cps, (ret_val_run, stdout_run, stderr_run)) = run_data
if ret_val_run:
all_fine = False
break
results.append(all_fine and ',0' or ',1')
else:
results.append(',-1')
if self._functest:
all_fine = True
for rt, rt_data in self._functest.iteritems():
(stamp_begin, stamp_end, functest_data) = rt_data
for test, test_results in functest_data.iteritems():
(log_file_name, error_file_name) = test_results
satester_report = test == 'Config_Parser' or test == 'Semantic_Analyser'
if satester_report:
log_file = open(log_file_name, 'rt')
log_file_data = log_file.readlines()
log_file.close()
log_file_data.reverse()
total_matched = passed = None
for line in log_file_data:
if not total_matched:
total_matched = re.match('^Total number of.*: (\d+)$', line)
if not passed:
passed = re.match('\s*PASSED.*cases: (\d+)', line)
if total_matched and passed:
if int(total_matched.group(1)) != int(passed.group(1)):
all_fine = False
break
if not total_matched or not passed:
self._logger.error('There\'s something wrong with the ' \
'function test logs, it\'s treated as an ' \
'error')
all_fine = False
break
else:
if error_file_name and os.path.isfile(error_file_name):
error_file = open(error_file_name, 'rt')
error_file_data = error_file.readlines()
error_file.close()
if len(error_file_data) != 0:
all_fine = False
break
results.append(all_fine and ',0' or ',1')
else:
results.append(',-1')
if self._vobtest:
# Unfortunately there's no `goto' in Python. However, returning from
# multiple loops can be done using exceptions...
all_fine = True
for rt, rt_data in self._vobtest.iteritems():
(stamp_begin, stamp_end, vobtest_data) = rt_data
for kind, products in vobtest_data.iteritems():
if not len(products) > 0:
continue
for product in products:
for name, name_data in product.iteritems():
if not isinstance(name_data, types.DictType):
all_fine = False
break
else:
for action, action_data in name_data.iteritems():
if isinstance(action_data, types.TupleType):
(ret_val, output_files, stdout, stderr) = action_data
if ret_val:
all_fine = False
break
results.append(all_fine and ',0' or ',1')
else:
results.append(',-1')
if self._eclipse:
(stamp_begin, stamp_end, log_file, (ret_val_ant, stdout_ant, stderr_ant)) = self._eclipse
results.append(ret_val_ant and ',1' or ',0')
else:
results.append(',-1')
return ''.join(results)
def as_txt_regtest(self):
result = []
for rt, rt_data in self._regtest.iteritems():
(stamp_begin, stamp_end, ((ret_val_make, stdout_make, stderr_make), \
(ret_val_run, stdout_run, stderr_run))) = rt_data
result.append('%s [%s - %s] Regression test results for the `%s\' ' \
'runtime\n\n' % (utils.get_time_diff(False, stamp_begin, stamp_end), \
stamp_begin, stamp_end, rt == 'rt2' and 'function-test' or 'load-test'))
if ret_val_make:
result.append('Regression test failed to build:\n\n%s\n' \
% ''.join(stdout_make[-20:]))
elif ret_val_run:
result.append('Regression test failed to run:\n\n%s\n' \
% ''.join(stdout_run[-20:]))
else:
result.append('Regression test built successfully.\n\n%s\n' \
% ''.join(stdout_run[-20:]))
return ''.join(result)
def as_txt_perftest(self):
result = []
for rt, rt_data in self._perftest.iteritems():
(stamp_begin, stamp_end, perftest_results) = rt_data
result.append('%s [%s - %s] Performance test results for the `%s\' ' \
'runtime\n\n' % (utils.get_time_diff(False, stamp_begin, stamp_end), \
stamp_begin, stamp_end, rt == 'rt2' and 'function-test' or 'load-test'))
(ret_val_dep, stdout_dep, stderr_dep) = perftest_results['dep']
(ret_val_make, stdout_make, stderr_make) = perftest_results['make']
run_data = perftest_results['run']
if ret_val_dep or ret_val_make:
result.append('Performance test failed to build:\n\n%s\n' \
% ''.join(ret_val_dep and stdout_dep[-20:] or stdout_make[-20:]))
else:
result.append('Performance test compiled successfully.\n\n')
for run in run_data:
(cps, (ret_val_run, stdout_run, stderr_run)) = run
result.append('For `%d\' CPS: ' % cps)
if ret_val_run:
result.append('Failed\n%s\n\n' % ''.join(stdout_run[-20:]))
else:
result.append('Succeeded\nExpected Calls/Measured Calls/' \
'Expected CPS/Measured CPS: %s\n' \
% ' '.join(''.join(filter(lambda run_info: \
'Entities/Time' in run_info, stdout_run)).split()[-5:-1]))
return ''.join(result)
def as_txt_eclipse(self):
result = []
(stamp_begin, stamp_end, log_file, (ret_val_ant, stdout_ant, stderr_ant)) = self._eclipse
result.append('%s [%s - %s] Eclipse build results\n\n'
% (utils.get_time_diff(False, stamp_begin, stamp_end), stamp_begin, stamp_end))
f = open(log_file, 'rt')
log_file_data = f.readlines()
f.close()
if ret_val_ant:
result.append('Eclipse plug-ins failed to build:\n%s\n\n' \
% ''.join(log_file_data[-20:]))
else:
result.append('Eclipse plug-ins built successfully.\n\n%s\n' \
% ''.join(log_file_data[-20:]))
return ''.join(result)
def as_txt_functest(self):
result = []
for rt, rt_data in self._functest.iteritems():
(stamp_begin, stamp_end, functest_results) = rt_data
result.append('%s [%s - %s] Function test results for the `%s\' runtime\n\n' \
% (utils.get_time_diff(False, stamp_begin, stamp_end), \
stamp_begin, stamp_end, (rt == 'rt2' and 'function-test' or 'load-test')))
for function_test, test_results in functest_results.iteritems():
(log_file_name, error_file_name) = test_results
satester_report = function_test == 'Config_Parser' or function_test == 'Semantic_Analyser'
if satester_report:
log_file = open(log_file_name, 'rt')
log_file_data = log_file.readlines()
log_file.close()
total_matched = passed = None
for line in log_file_data:
if not total_matched:
total_matched = re.match('^Total number of.*: (\d+)$', line)
if not passed:
passed = re.match('\s*PASSED.*cases: (\d+)', line)
if passed and total_matched:
if int(passed.group(1)) == int(total_matched.group(1)):
result.append('All `%s\' function tests succeeded.\n' \
% function_test)
else:
result.append('\n`%s\' function tests failed:\n\n%s\n' \
% (function_test, \
''.join(log_file_data[-20:])))
break
else:
if error_file_name and os.path.isfile(error_file_name):
error_file = open(error_file_name, 'rt')
error_file_data = error_file.readlines()
error_file.close()
if len(error_file_data) == 0:
result.append('All `%s\' function tests succeeded.\n' \
% function_test)
else:
result.append('\n`%s\' function tests failed:\n\n%s\n' \
% (function_test, \
''.join(error_file_data[-20:])))
else:
result.append('All `%s\' function tests succeeded.\n' \
% function_test)
result.append('\n')
return ''.join(result)
def as_txt_vobtest(self):
result = []
header = ('Product/Action', '`compiler -s\'', '`compiler\'', '`make\'', '`make run\'\n')
for rt, rt_data in self._vobtest.iteritems():
(stamp_begin, stamp_end, vobtest_results) = rt_data
result.append('%s [%s - %s] VOB product results for the %s runtime\n\n' \
% (utils.get_time_diff(False, stamp_begin, stamp_end), \
stamp_begin, stamp_end, (rt == 'rt2' and 'function-test' or 'load-test')))
for kind, products in vobtest_results.iteritems():
if not len(products) > 0:
continue
title = 'Results for %d `%s\' products using the %s runtime:' \
% (len(products), kind, (rt == 'rt2' and 'function-test' \
or 'load-test'))
result.append('%s\n%s\n' % (title, '-' * len(title)))
body = []
for product in products:
for name, name_data in product.iteritems():
row = [name]
if not isinstance(name_data, types.DictType):
row.extend(['Unavailable'] * (len(header) - 1))
body.append(row)
else:
action_order = {'semantic':1, 'translate':2, 'compile':3, 'run':4}
row.extend([''] * len(action_order.keys()))
for action, action_data in name_data.iteritems():
if not action in action_order.keys():
self._logger.error('Unknown action `%s\'while preparing ' \
'the text output' % action)
continue
action_index = action_order[action]
if not isinstance(action_data, types.TupleType):
row[action_index] = 'Disabled'
else:
(ret_val, output_files, stdout, stderr) = action_data
row[action_index] = '%s' % (ret_val != 0 and '*Failure*' or 'Success')
body.append(row)
result.append(self.as_txt_table(header, body) + '\n')
return ''.join(result)
def as_txt(self, stamp_begin, stamp_end, config, config_name, slave_name):
""" Return the string representation of the test results.
"""
results = []
uname_out = utils.run_cmd('uname -srmp')[1]
gcc_out = filter(lambda v: v.find(' ver') > 0, utils.run_cmd('%s -v' % (('cc' in config and len(config['cc']) > 0) and config['cc'] or 'gcc'))[2])
results.append('Platform: %s\nGCC/LLVM version: %s\n\n' \
% (uname_out[0].strip(), gcc_out[0].strip()))
if self._titan:
(stamp_begin, stamp_end, \
((ret_val_dep, stdout_dep, stderr_dep), \
(ret_val_make, stdout_make, stderr_make), \
(ret_val_install, stdout_install, stderr_install))) = self._titan
results.append('%s [%s - %s] TITAN build\n\n' \
% (utils.get_time_diff(False, stamp_begin, stamp_end), \
stamp_begin, stamp_end))
if ret_val_dep or ret_val_make or ret_val_install:
# The `stderr' is always redirected to `stdout'.
results.append('TITAN build failed, check the logs for further ' \
'investigation...\n\n%s\n' \
% ''.join(stdout_install[-20:]))
else:
results.append('TITAN build succeeded.\n\n%s\n' \
% utils.get_license_info('%s/bin/compiler' \
% self._config.configs[config_name]['installdir']))
if self._regtest:
results.append(self.as_txt_regtest())
if self._perftest:
results.append(self.as_txt_perftest())
if self._eclipse:
results.append(self.as_txt_eclipse())
if self._functest:
results.append(self.as_txt_functest())
if self._vobtest:
results.append(self.as_txt_vobtest())
return ''.join(results)
def as_txt_table(self, header = None, body = []):
""" Create a table like ASCII composition using the given header and the
rows of the table. The header is an optional string list. If the
header is present and there are more columns in the body the smaller
wins.
Arguments:
header: The columns of the table.
body: Cell contents.
Returns:
The table as a string.
"""
if len(body) == 0 or len(body) != len([row for row in body \
if isinstance(row, types.ListType)]):
self._logger.error('The second argument of `as_text_table()\' must be ' \
'a list of lists')
return ''
num_cols = len(body[0])
max_widths = []
if header and len(header) < num_cols:
num_cols = len(header)
for col in range(num_cols):
max_width = -1
for row in range(len(body)):
if max_width < len(body[row][col]):
max_width = len(body[row][col])
if header and max_width < len(header[col]):
max_width = len(header[col])
max_widths.append(max_width + 2) # Ad-hoc add.
ret_val = '' # Start filling the table.
if header:
ret_val += ''.join([cell.ljust(max_widths[i]) \
for i, cell in enumerate(header[:num_cols])]) + '\n'
for row in range(len(body)):
ret_val += ''.join([cell.ljust(max_widths[i]) \
for i, cell in enumerate(body[row][:num_cols])]) + '\n'
return ret_val
def as_html_titan(self, config_name, slave_name):
""" Return the HTML representation of the TITAN build results as a string.
"""
result = []
(stamp_begin, stamp_end, \
((ret_val_dep, stdout_dep, stderr_dep), \
(ret_val_make, stdout_make, stderr_make), \
(ret_val_install, stdout_install, stderr_install))) = self._titan
result.append('<span class="%s">TITAN build</span><br/><br/>\n' \
% ((ret_val_dep or ret_val_make or ret_val_install) \
and 'error_header' or 'header'))
result.append('( `<a href="titan.dep">make dep</a>\' )<br/><br/>\n')
result.append('( `<a href="titan.make">make</a>\' )<br/><br/>\n')
result.append('( `<a href="titan.install">make install</a>\' )' \
'<br/><br/>\n')
result.append('<span class="stamp">%s - %s [%s]</span>\n' \
% (stamp_begin, stamp_end, \
utils.get_time_diff(False, stamp_begin, stamp_end)))
result.append('<pre>\n')
if ret_val_dep or ret_val_make or ret_val_install:
result.append('The TITAN build failed, check the logs for further ' \
'investigation...\n\n%s\n' % self.strip_tags(''.join(stdout_install[-20:])))
else:
result.append('TITAN build succeeded.\n\n%s\n' \
% self.strip_tags(utils.get_license_info('%s/bin/compiler' \
% self._config.configs[config_name]['installdir'])))
result.append('</pre>\n')
return ''.join(result)
def as_html_regtest(self, config_name, slave_name):
""" Return the HTML representation of the regression test results as a
string. The last part of the output is always included.
"""
result = []
for rt, rt_data in self._regtest.iteritems():
(stamp_begin, stamp_end, ((ret_val_make, stdout_make, stderr_make), \
(ret_val_run, stdout_run, stderr_run))) = rt_data
result.append('<span class="%s">Regression test results for the `%s\' ' \
'runtime</span><br/><br/>\n' \
% (((ret_val_make or ret_val_run) and 'error_header' or 'header'), \
(rt == 'rt2' and 'function-test' or 'load-test')))
result.append('( `<a href="regtest-make.%s">make</a>\' )<br/><br/>\n' % rt)
result.append('( `<a href="regtest-run.%s">make run</a>\' )<br/><br/>\n' % rt)
result.append('<span class="stamp">%s - %s [%s]</span>\n<pre>\n' \
% (stamp_begin, stamp_end, \
utils.get_time_diff(False, stamp_begin, stamp_end)))
if ret_val_make:
result.append('Regression test failed to build:\n\n%s\n</pre>\n' \
% self.strip_tags(''.join(stdout_make[-20:])))
elif ret_val_run:
result.append('Regression test failed to run:\n\n%s\n</pre>\n' \
% self.strip_tags(''.join(stdout_run[-20:])))
else:
result.append('Regression test built successfully.\n\n%s\n</pre>\n' \
% self.strip_tags(''.join(stdout_run[-20:])))
return ''.join(result)
def as_html_perftest(self, config_name, slave_name):
""" Return the HTML representation of the performance test results as a
string. Some logic is included.
"""
result = []
for rt, rt_data in self._perftest.iteritems():
(stamp_begin, stamp_end, perftest_results) = rt_data
(ret_val_dep, stdout_dep, stderr_dep) = perftest_results['dep']
(ret_val_make, stdout_make, stderr_make) = perftest_results['make']
run_data = perftest_results['run']
run_failed = False
for run in run_data:
(cps, (ret_val_run, stdout_run, stderr_run)) = run
if ret_val_run:
run_failed = True
break
result.append(
'<span class="%s">Performance test results for the `%s\' ' \
'runtime</span><br/><br/>\n' \
% (((ret_val_dep or ret_val_make or run_failed) \
and 'error_header' or 'header'), \
(rt == 'rt2' and 'function-test' or 'load-test')))
result.append('( `<a href="perftest.%s">make</a>\' )<br/><br/>\n' % rt)
result.append('( `<a href=".">make run</a>\' )<br/><br/>')
result.append('<span class="stamp">%s - %s [%s]</span>\n' \
% (stamp_begin, stamp_end, \
utils.get_time_diff(False, stamp_begin, stamp_end)))
result.append('<pre>\n')
if ret_val_dep or ret_val_make:
result.append('Performance test failed to build:\n\n%s\n' \
% self.strip_tags(''.join(ret_val_dep and stdout_dep[-20:] or stdout_make[-20:])))
else:
result.append('Performance test compiled successfully.\n\n')
result.append('<embed src="perftest-stats-%s.svg" width="640" height="480" type="image/svg+xml"/>\n\n' % rt)
for run in run_data:
(cps, (ret_val_run, stdout_run, stderr_run)) = run
if ret_val_run:
result.append('Failed for `%d\' CPS.\n\n%s\n\n' \
% (cps, self.strip_tags(''.join(stdout_run[-20:]))))
else:
result.append('Expected Calls/Measured Calls/' \
'Expected CPS/Measured CPS: %s\n' \
% ' '.join(''.join(filter(lambda run_info: \
'Entities/Time' in run_info, stdout_run)).split()[-5:-1]))
result.append('\n</pre>\n')
return ''.join(result)
def as_html_eclipse(self, config_name, slave_name):
result = []
(stamp_begin, stamp_end, log_file, (ret_val_ant, stdout_ant, stderr_ant)) = self._eclipse
result.append('<span class="%s">Eclipse plug-in build results</span><br/><br/>\n' \
% ((ret_val_ant and 'error_header' or 'header')))
result.append('( `<a href="eclipse-mylog.log">ant</a>\' )<br/><br/>\n')
result.append('<span class="stamp">%s - %s [%s]</span>\n<pre>\n' \
% (stamp_begin, stamp_end, \
utils.get_time_diff(False, stamp_begin, stamp_end)))
f = open(log_file, 'rt')
log_file_data = f.readlines()
f.close()
if ret_val_ant:
result.append('Eclipse plug-ins failed to build:\n\n%s\n</pre>\n' \
% self.strip_tags(''.join(log_file_data[-20:])))
else:
result.append('Eclipse plug-ins built successfully.\n\n%s\n</pre>\n' \
% self.strip_tags(''.join(log_file_data[-20:])))
return ''.join(result)
def as_html_functest(self, config_name, slave_name):
""" Return the HTML representation of the function test results as a
string. Some logic is included.
"""
result = []
for rt, rt_data in self._functest.iteritems():
(stamp_begin, stamp_end, functest_results) = rt_data
any_failure = False
result_tmp = []
for function_test, test_results in functest_results.iteritems():
(log_file_name, error_file_name) = test_results
satester_report = function_test == 'Config_Parser' or function_test == 'Semantic_Analyser'
if satester_report:
log_file = open(log_file_name, 'rt')
log_file_data = log_file.readlines()
log_file.close()
total_matched = passed = None
for line in log_file_data:
if not total_matched:
total_matched = re.match('^Total number of.*: (\d+)$', line)
if not passed:
passed = re.match('\s*PASSED.*cases: (\d+)', line)
if passed and total_matched:
if int(passed.group(1)) == int(total_matched.group(1)):
result_tmp.append('All `%s\' function tests succeeded.\n' \
% function_test)
else:
result_tmp.append('\n`%s\' function tests failed:\n\n%s\n' \
% (function_test, \
self.strip_tags(''.join(log_file_data[-20:]))))
any_failure = True
break
else:
if error_file_name and os.path.isfile(error_file_name):
error_file = open(error_file_name, 'rt')
error_file_data = error_file.readlines()
error_file.close()
if len(error_file_data) == 0:
result_tmp.append('All `%s\' function tests succeeded.\n' \
% function_test)
else:
result_tmp.append('\n`%s\' function tests failed:\n\n%s\n' \
% (function_test, \
self.strip_tags(''.join(error_file_data[-20:]))))
any_failure = True
else:
result_tmp.append('All `%s\' function tests succeeded.\n' \
% function_test)
result.append('<span class="%s">Function test results for the ' \
'`%s\' runtime</span><br/><br/>\n' \
% ((any_failure and 'error_header' or 'header'), \
(rt == 'rt2' and 'function-test' or 'load-test')))
result.append('( `<a href=".">make all</a>\')<br/><br/>\n')
result.append('<span class="stamp">%s - %s [%s]</span>\n' \
% (stamp_begin, stamp_end, \
utils.get_time_diff(False, stamp_begin, stamp_end)))
result.append('<pre>\n')
result.extend(result_tmp)
result.append('\n</pre>\n')
return ''.join(result)
def as_html_vobtest(self, config_name, slave_name):
""" Return the HTML representation of the VOB product tests as a string.
Some logic is included.
"""
result = []
header = ('Product/Action', '`compiler -s\'', '`compiler\'', '`make\'', '`make run\'\n')
for rt, rt_data in self._vobtest.iteritems():
(stamp_begin, stamp_end, vobtest_results) = rt_data
any_failure = False
result_tmp = []
for kind, products in vobtest_results.iteritems():
if not len(products) > 0:
continue
body = []
for product in products:
for name, name_data in product.iteritems():
row = [name]
if not isinstance(name_data, types.DictType):
row.extend(['Unavailable'] * (len(header) - 1))
body.append(row)
any_failure = True
else:
action_order = {'semantic':1, 'translate':2, 'compile':3, 'run':4}
row.extend([''] * len(action_order.keys()))
for action, action_data in name_data.iteritems():
if not action in action_order.keys():
self._logger.error('Unknown action `%s\'while preparing ' \
'the HTML output' % action)
continue
action_index = action_order[action]
if not isinstance(action_data, types.TupleType):
row[action_index] = 'Disabled'
else:
(ret_val, output_files, stdout, stderr) = action_data
row[action_index] = (ret_val and '*Failure*' or 'Success')
if ret_val:
any_failure = True
body.append(row)
title = 'Results for %d `%s\' products using the %s runtime:' \
% (len(products), kind, (rt == 'rt2' and 'function-test' \
or 'load-test'))
result_tmp.append('%s\n%s\n' % (title, '-' * len(title)))
result_tmp.append(self.as_txt_table(header, body) + '\n')
result.append('<span class="%s">VOB product results for the %s ' \
'runtime</span><br/><br/>\n' \
% ((any_failure and 'error_header' or 'header'), \
(rt == 'rt2' and 'function-test' or 'load-test')))
result.append('( `<a href="products/">make all</a>\' )<br/><br/>\n')
result.append('<span class="stamp">%s - %s [%s]</span>\n' \
% (stamp_begin, stamp_end, \
utils.get_time_diff(False, stamp_begin, stamp_end)))
result.append('<pre>\n')
result.extend(result_tmp)
result.append('</pre>\n')
return ''.join(result)
def as_html(self, stamp_old, stamp_new, config, config_name, slave_name):
""" Return the HTML representation of all test results of the given slave
as a string.
"""
result = [
'<?xml version="1.0" encoding="ISO8859-1"?>\n' \
'<html>\n' \
'<head>\n' \
'<meta http-equiv="content-type" content="text/html; charset=ISO8859-1"/>\n' \
'<link rel="stylesheet" type="text/css" href="../../index.css"/>\n' \
'<title>Shouldn\'t matter...</title>\n' \
'</head>\n' \
'<body>\n'
]
uname_out = utils.run_cmd('uname -srmp')[1]
gcc_out = filter(lambda v: v.find(' ver') > 0, utils.run_cmd('%s -v' % (('cc' in config and len(config['cc']) > 0) and config['cc'] or 'gcc'))[2])
result.append('<pre>\nPlatform: %s\nGCC/LLVM version: %s</pre>\n\n' \
% (uname_out[0].strip(), gcc_out[0].strip()))
if self._titan:
result.append(self.as_html_titan(config_name, slave_name))
if self._regtest:
result.append(self.as_html_regtest(config_name, slave_name))
if self._perftest:
result.append(self.as_html_perftest(config_name, slave_name))
if self._eclipse:
result.append(self.as_html_eclipse(config_name, slave_name))
if self._functest:
result.append(self.as_html_functest(config_name, slave_name))
if self._vobtest:
result.append(self.as_html_vobtest(config_name, slave_name))
result += [
'</body>\n' \
'</html>\n'
]
return ''.join(result)
def publish_csv2email(self, build_start, build_end, email_file, \
slave_list, build_root, configs, reset):
""" Assemble a compact e-mail message from the CSV data provided by each
slave in the current build. The assembled e-mail message is written
to a file. It's ready to send. It's called by the master.
Arguments:
build_start: Start of the whole build for all slaves.
build_end: End of the whole build for all slaves.
email_file: Store the e-mail message here.
slave_list: Slaves processed.
build_root: The actual build directory.
configs: All configurations.
reset: Reset statistics.
"""
email_header = 'Full build time:\n----------------\n\n%s <-> %s\n\n' \
% (build_start, build_end)
email_footer = 'For more detailed results, please visit:\n' \
'http://ttcn.ericsson.se/titan-testresults/titan_builds or\n' \
'http://ttcn.ericsson.se/titan-testresults/titan_builds/%s.\n\n' \
'You\'re receiving this e-mail, because you\'re ' \
'subscribed to daily TITAN build\nresults. If you want ' \
'to unsubscribe, please reply to this e-mail. If you\n' \
'received this e-mail by accident please report that ' \
'too. Thank you.\n' % build_root
email_matrix = 'The result matrix:\n------------------\n\n'
header = ('Slave/Action', 'TITAN build', 'Reg. tests', 'Perf. tests', \
'Func. tests', 'VOB tests', 'Eclipse build') # It's long without abbrevs.
rows = []
slave_names = []
stat_handler = None
for slave in slave_list:
(slave_name, config_name, is_localhost) = slave
slave_names.append(config_name)
csv_file_name = '%s/%s/report.csv' \
% (self._config.common['logdir'], config_name)
if 'measure' in configs[config_name] and configs[config_name]['measure']:
stat_handler = StatHandler(self._logger, self._config.common, configs, slave_list, reset)
if not os.path.isfile(csv_file_name):
self._logger.error('It seems that we\'ve lost `%s\' for configuration `%s\'' % (slave_name, config_name))
local_row = [slave_name]
local_row.extend(['Lost'] * (len(header) - 1))
rows.append(local_row)
if stat_handler:
stat_handler.lost(config_name)
continue
csv_file = open(csv_file_name, 'rt')
csv_data = csv_file.readlines()
csv_file.close()
if len(csv_data) != 1:
self._logger.error('Error while processing `%s/%s/report.csv\' at ' \
'the end, skipping slave' \
% (self._config.common['logdir'], config_name))
else:
csv_data = csv_data[0].split(',')
local_row = [csv_data[4]] # Should be `config_name'.
if stat_handler:
stat_handler.disabled_success_failure(config_name, csv_data[6:])
for result in csv_data[6:]:
if int(result) == -1:
local_row.append('Disabled')
elif int(result) == 0:
local_row.append('Success')
elif int(result) == 1:
local_row.append('*Failure*')
rows.append(local_row)
email_matrix += '%s\n' % self.as_txt_table(header, rows)
file = open(email_file, 'wt')
file.write(email_header)
if stat_handler:
file.write(str(stat_handler))
file.write(email_matrix)
file.write(email_footer)
file.close()
def backup_logs(self):
""" Handle archiving and backup activities.
Returns:
A dictionary with None values.
"""
archived_builds = {}
for file in os.listdir(self._config.common['htmldir']):
if os.path.isdir('%s/%s' % (self._config.common['htmldir'], file)):
matched_dir = re.search('(\d{8}_\d{6})', file)
if not matched_dir:
continue
diff_in_days = utils.diff_in_days(matched_dir.group(1), utils.get_time(True))
if diff_in_days > self._config.common['archive']:
self._logger.debug('Archiving logs for build `%s\'' % matched_dir.group(1))
utils.run_cmd('cd %s && tar cf %s.tar %s' \
% (self._config.common['htmldir'], \
matched_dir.group(1), matched_dir.group(1)), None, 1800)
utils.run_cmd('bzip2 %s/%s.tar && rm -rf %s/%s' \
% (self._config.common['htmldir'], matched_dir.group(1), \
self._config.common['htmldir'], matched_dir.group(1)), None, 1800)
archived_builds[matched_dir.group(1)] = None
else:
matched_archive = re.search('(\d{8}_\d{6}).tar.bz2', file)
if not matched_archive:
continue
diff_in_days = utils.diff_in_days(matched_archive.group(1), utils.get_time(True))
if 'cleanup' in self._config.common and 'cleanupslave' in self._config.common and \
diff_in_days > self._config.common['cleanup']:
slave_name = self._config.common['cleanupslave']['slave']
if slave_name in self._config.slaves:
slave = self._config.slaves[slave_name]
slave_url = '%s@%s' % (slave['user'], slave['ip'])
utils.run_cmd('ssh %s \'mkdir -p %s\'' \
% (slave_url, self._config.common['cleanupslave']['dir']))
(ret_val_scp, stdout_scp, stderr_scp) = \
utils.run_cmd('scp %s/%s %s:%s' \
% (self._config.common['htmldir'], file, slave_url, \
self._config.common['cleanupslave']['dir']))
if not ret_val_scp:
utils.run_cmd('rm -f %s/%s' % (self._config.common['htmldir'], file))
continue
else:
self._logger.error('Slave with name `%s\' cannot be found in ' \
'the slaves\' list' % slave_name)
archived_builds[matched_archive.group(1)] = None
return archived_builds
def strip_tags(self, text):
""" Replace all '<', '>' etc. characters with their HTML equivalents. """
return text.replace('&', '&').replace('<', '<').replace('>', '>')
def publish_html(self, build_root):
""" Create basic HTML output from the published directory structure. It
should be regenerated after every build. The .css file is generated
from here as well. No external files used. It is responsible for
publishing in general.
Arguments:
build_root: The actual build directory.
"""
self.generate_css()
html_index = os.path.join(self._config.common['htmldir'], 'index.html')
html_menu = os.path.join(self._config.common['htmldir'], 'menu.html')
index_file = open(html_index, 'wt')
index_file.write(
'<?xml version="1.0" encoding="ISO8859-1"?>\n' \
'<html>\n' \
'<head>\n' \
'<meta http-equiv="content-type" content="text/html; charset=ISO8859-1"/>\n' \
'<link rel="stylesheet" type="text/css" href="index.css"/>\n' \
'<title>Build results (Updated: %s)</title>\n' \
'</head>\n' \
'<frameset cols="285,*">\n' \
'<frame src="menu.html" name="menu"/>\n' \
'<frame src="%s/report.txt" name="contents"/>\n' \
'</frameset>\n' \
'</html>\n' % (build_root, build_root))
index_file.close()
menu_file = open(html_menu, 'wt')
menu_contents_dict = self.backup_logs()
for root, dirs, files in os.walk(self._config.common['htmldir']):
build_match = re.match('(\d{8}_\d{6})', root.split('/')[-1])
if build_match:
dirs.sort()
dirs_list = ['<li><a href="%s/%s/report.html" target="contents">%s' \
'</a></li>\n' % (build_match.group(1), elem, elem) for elem in dirs]
menu_contents_dict[build_match.group(1)] = dirs_list
sorted_keys = menu_contents_dict.keys()
sorted_keys.sort(reverse = True)
menu_contents = ''
bg_toggler = False
for build in sorted_keys:
build_data = menu_contents_dict[build]
if build_data:
menu_contents += \
'<tr>\n' \
'<td bgcolor="%s">\nBuild #: <b>' \
'<a href="%s/report.txt" target="contents">%s</a></b>\n' \
'<ul>\n%s</ul>\n' \
'</td>\n' \
'</tr>\n' % ((bg_toggler and '#a9c9e1' or '#ffffff'), build, \
build, ''.join(build_data))
bg_toggler = not bg_toggler
else:
menu_contents += \
'<tr>\n' \
'<td bgcolor="#c1c1ba">\nBuild #: <b>' \
'<a href="%s.tar.bz2" target="contents">%s</a> (A)</b>\n' \
'</td>\n' \
'</tr>\n' % (build, build)
menu_file.write(
'<?xml version="1.0" encoding="ISO8859-1"?>\n' \
'<html>\n' \
'<head>\n' \
'<meta http-equiv="content-type" content="text/html; charset=ISO8859-1"/>\n' \
'<link rel="stylesheet" type="text/css" href="index.css"/>' \
'<title>Shouldn\'t matter...</title>\n' \
'</head>\n' \
'<body>\n<pre>\n' \
' _\n'
' ____( )___________\n'
'/_ _/ /_ _/ \ \\\n'
' /_//_/ /_//_/\_\_\_\\\n'
'</pre>\n'
'<table class="Menu">\n' \
'%s\n' \
'</table>\n' \
'</body>\n' \
'</html>\n' % menu_contents)
menu_file.close()
self._plotter.collect_data()
self._plotter.plot(build_root)
def generate_css(self):
css_file = file('%s/index.css' % self._config.common['htmldir'], 'wt')
css_file.write(
'body, td {\n' \
' font-family: Verdana, Cursor;\n' \
' font-size: 10px;\n' \
' font-weight: bold;\n' \
'}\n\n' \
'table {\n' \
' border-spacing: 1px 1px;\n' \
'}\n\n' \
'table td {\n' \
' padding: 8px 4px 8px 4px;\n' \
'}\n\n' \
'table.Menu td {\n' \
' border: 1px gray solid;\n' \
' text-align: left;\n' \
' width: 160px;\n' \
'}\n\n' \
'pre {\n' \
' font-size: 11px;\n' \
' font-weight: normal;\n' \
'}\n\n'
'a:link,a:visited,a:active {\n' \
' color: #00f;\n' \
'}\n\n'
'a:hover {\n' \
' color: #444;\n' \
'}\n\n' \
'.error_header {\n' \
' font-weight: bold;\n' \
' font-size: 18px;\n' \
' color: #f00;\n' \
'}\n\n' \
'.header {\n' \
' font-weight: bold;\n' \
' font-size: 18px;\n' \
' color: #000;\n' \
'}\n\n' \
'.stamp {\n' \
' font-size: 11px;\n' \
'}\n'
)
css_file.close()
class plotter:
def __init__(self, logger, config):
self._logger = logger
self._config = config
self._htmldir = self._config.common.get('htmldir', '')
self._stats = {}
def collect_data(self):
self._logger.debug('Collecting statistical data for plotting to `%s\'' % self._htmldir)
dirs_to_check = [dir for dir in os.listdir(self._htmldir) \
if os.path.isdir(os.path.join(self._htmldir, dir)) \
and re.match('(\d{8}_\d{6})', dir)]
dirs_to_check.sort()
for dir in dirs_to_check:
date = '%s-%s-%s' % (dir[0:4], dir[4:6], dir[6:8])
date_dir = os.path.join(self._htmldir, dir)
platforms = [platform for platform in os.listdir(date_dir) \
if os.path.isdir(os.path.join(date_dir, platform))]
for platform in platforms:
platform_dir = os.path.join(date_dir, platform)
files = os.listdir(platform_dir)
files.sort()
stat_files = [file for file in files if 'perftest-stats' in file and file.endswith('csv')]
if len(stat_files) > 0 and len(stat_files) <= 2:
for file in stat_files:
rt = 'rt2' in file and 'rt2' or 'rt1'
if not rt in self._stats:
self._stats[rt] = {}
if not platform in self._stats[rt]:
self._stats[rt][platform] = []
file = open(os.path.join(platform_dir, file), 'rt')
for line in file:
dates_in = [d[0] for d in self._stats[rt][platform]]
if not line.split(',')[0] in dates_in:
self._stats[rt][platform].append(line.split(','))
file.close()
else:
data_rt1 = [date]
data_rt2 = [date]
for file in files:
rt = 'rt2' in file and 'rt2' or 'rt1'
if not rt in self._stats:
self._stats[rt] = {}
if not platform in self._stats[rt]:
self._stats[rt][platform] = []
if re.match('perftest\.rt\d{1}\-\d+', file):
file = open(os.path.join(platform_dir, file), 'rt')
for line in file:
if re.search('=>>>Entities/Time', line):
if rt == 'rt1':
data_rt1.extend(line.split()[-5:-1])
else:
data_rt2.extend(line.split()[-5:-1])
break
file.close()
if len(data_rt1) > 1:
dates_in = [d[0] for d in self._stats['rt1'][platform]]
if not data_rt1[0] in dates_in:
self._stats['rt1'][platform].append(data_rt1)
if len(data_rt2) > 1:
dates_in = [d[0] for d in self._stats['rt2'][platform]]
if not data_rt2[0] in dates_in:
self._stats['rt2'][platform].append(data_rt2)
def plot(self, build_dir):
self._logger.debug('Plotting collected statistical data')
for runtime, runtime_data in self._stats.iteritems():
for config_name, config_data in runtime_data.iteritems():
target_dir = os.path.join(os.path.join(self._htmldir, build_dir), config_name)
if len(config_data) < 1 or not os.path.isdir(target_dir):
continue
csv_file_name = os.path.join(target_dir, 'perftest-stats-%s.csv-tmp' % runtime)
cfg_file_name = os.path.join(target_dir, 'perftest-stats-%s.cfg' % runtime)
csv_file = open(csv_file_name, 'wt')
cfg_file = open(cfg_file_name, 'wt')
youngest = config_data[0][0]
oldest = config_data[0][0]
for line in config_data:
if line[0] < oldest:
oldest = line[0]
if line[0] > youngest:
youngest = line[0]
csv_file.write('%s\n' % ','.join(line).strip())
csv_file.close()
# `gnuplot' requires it to be sorted...
utils.run_cmd('cat %s | sort >%s' % (csv_file_name, csv_file_name[0:-4]))
utils.run_cmd('rm -f %s' % csv_file_name)
csv_file_name = csv_file_name[0:-4]
config = self._config.configs.get(config_name, {})
cps_min = config.get('cpsmin', 1000)
cps_max = config.get('cpsmax', 2000)
cps_diff = abs(cps_max - cps_min) / 5
cfg_file.write( \
'set title "TITANSim CPS Statistics with LGenBase\\n(%d-%d CPS on \\`%s\\\')"\n' \
'set datafile separator ","\n' \
'set xlabel "Date"\n' \
'set xdata time\n' \
'set timefmt "%%Y-%%m-%%d"\n' \
'set xrange ["%s":"%s"]\n' \
'set format x "%%b %%d\\n%%Y"\n' \
'set ylabel "CPS"\n' \
'set terminal svg size 640, 480\n' \
'set grid\n' \
'set key right bottom\n' \
'set key spacing 1\n' \
'set key box\n' \
'set output "%s/perftest-stats-%s.svg"\n' \
'plot "%s" using 1:5 title "%d CPS" with linespoints, \\\n' \
'"%s" using 1:9 title "%d CPS" with linespoints, \\\n' \
'"%s" using 1:13 title "%d CPS" with linespoints, \\\n' \
'"%s" using 1:17 title "%d CPS" with linespoints, \\\n' \
'"%s" using 1:21 title "%d CPS" with linespoints, \\\n' \
'"%s" using 1:25 title "%d CPS" with linespoints\n' \
% (cps_min, cps_max, config_name, oldest, youngest, target_dir,
runtime, csv_file_name, cps_min, csv_file_name,
cps_min + cps_diff, csv_file_name, cps_min + 2 * cps_diff,
csv_file_name, cps_min + 3 * cps_diff, csv_file_name,
cps_min + 4 * cps_diff, csv_file_name, cps_max))
cfg_file.close()
utils.run_cmd('gnuplot %s' % cfg_file_name)
class StatHandler:
""" The implementation of this class is based on the format of `result.txt'.
"""
def __init__(self, logger, common_configs, configs, slave_list, reset):
self._logger = logger
self._configs = configs
self._common_configs = common_configs
self._html_root = self._common_configs.get('htmldir')
self._configs_to_support = []
self._first_period_started = None
self._period_started = None
self._overall_score = 0
self._overall_score_all = 0
self._period_score = 0
self._period_score_all = 0
for slave in slave_list: # Prepare list of active configurations.
(slave_name, config_name, is_localhost) = slave
if not self.is_weekend_or_holiday() and config_name in self._configs and 'measure' in self._configs[config_name] and self._configs[config_name]['measure']:
self._configs_to_support.append(config_name)
# Scan and parse the latest `report.txt' file.
dirs_to_check = [dir for dir in os.listdir(self._html_root) if os.path.isdir(os.path.join(self._html_root, dir)) and re.match('(\d{8}_\d{6})', dir)]
dirs_to_check.sort()
dirs_to_check.reverse()
for dir in dirs_to_check:
report_txt_path = os.path.join(self._html_root, os.path.join(dir, 'report.txt'))
if os.path.isfile(report_txt_path):
report_txt = open(report_txt_path, 'rt')
for line in report_txt:
first_period_line_matched = re.search('^First period.*(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}).*', line)
overall_score_line_matched = re.search('^Overall score.*(\d+)/(\d+).*', line)
period_started_line_matched = re.search('^This period.*(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}).*', line)
period_score_line_matched = re.search('^Period score.*(\d+)/(\d+).*', line)
if first_period_line_matched:
self._first_period_started = first_period_line_matched.group(1)
elif overall_score_line_matched:
self._overall_score = int(overall_score_line_matched.group(1))
self._overall_score_all = int(overall_score_line_matched.group(2))
elif period_started_line_matched:
self._period_started = period_started_line_matched.group(1)
elif period_score_line_matched:
self._period_score = int(period_score_line_matched.group(1))
self._period_score_all = int(period_score_line_matched.group(2))
report_txt.close()
if self._first_period_started is None or self._period_started is None \
or self._overall_score is None or self._overall_score_all is None \
or self._period_score is None or self._period_score_all is None:
self._logger.debug('Something is wrong with the report file `%s\'' \
% report_txt_path)
continue
self._logger.debug('Using report file `%s\'' % report_txt_path)
break
if not self.is_weekend_or_holiday():
self._overall_score_all += (2 * len(self._configs_to_support))
self._period_score_all += (2 * len(self._configs_to_support))
if not self._first_period_started:
self._first_period_started = utils.get_time()
if not self._period_started:
self._period_started = utils.get_time()
if reset or int(utils.get_time_diff(False, self._period_started, utils.get_time(), True)[0]) / 24 >= self._common_configs.get('measureperiod', 30):
self._period_started = utils.get_time()
self._period_score = self._period_score_all = 0
def is_weekend_or_holiday(self):
""" Weekends or any special holidays to ignore. """
ignore = int(time.strftime('%w')) == 0 or int(time.strftime('%w')) == 6
if not ignore:
holidays = ((1, 1), (3, 15), (5, 1), (8, 20), (10, 23), (11, 1), (12, 25), (12, 26))
month = int(time.strftime('%m'))
day = int(time.strftime('%d'))
for holiday in holidays:
if (month, day) == holiday:
ignore = True
break
return ignore
def lost(self, config_name):
if not config_name in self._configs_to_support:
return
self._overall_score += 1
self._period_score += 1
def disabled_success_failure(self, config_name, results):
""" `results' is coming from the CSV file. """
if not config_name in self._configs_to_support:
return
titan = int(results[0])
regtest = int(results[1])
perftest = int(results[2]) # Not counted.
functest = int(results[3])
# Nothing to do, unless a warning.
if titan == -1 or regtest == -1 or functest == -1:
self._logger.warning('Mandatory tests were disabled for build '
'configuration `%s\', the generated statistics ' \
'may be false, check it out' % config_name)
if titan == 0 and regtest == 0 and functest == 0:
self._overall_score += 2
self._period_score += 2
def percent(self, score, score_all):
try:
ret_val = (float(score) / float(score_all)) * 100.0
except:
return 0.0;
return ret_val;
def buzzword(self, percent):
if percent > 80.0: return 'Stretched'
elif percent > 70.0: return 'Commitment'
elif percent > 60.0: return 'Robust'
else: return 'Unimaginable'
def __str__(self):
if len(self._configs_to_support) == 0:
return ''
overall_percent = self.percent(self._overall_score, self._overall_score_all)
period_percent = self.percent(self._period_score, self._period_score_all)
ret_val = 'Statistics:\n-----------\n\n' \
'Configurations: %s\n' \
'First period: %s\n' \
'Overall score: %d/%d (%.2f%%) %s\n' \
'This period: %s\n' \
'Period score: %d/%d (%.2f%%) %s\n\n' \
% (', '.join(self._configs_to_support), self._first_period_started, self._overall_score, self._overall_score_all,
overall_percent, self.buzzword(overall_percent), self._period_started,
self._period_score, self._period_score_all, period_percent, self.buzzword(period_percent))
return ret_val
|
BenceJanosSzabo/titan.core
|
etc/autotest/titan_publisher.py
|
Python
|
epl-1.0
| 58,576
|
[
"VisIt"
] |
f9dcd01076aeadb1532d17b7fa0efc29a7caaa17d6728a5a6c06dc730a7e86f2
|
# Copyright (C) 2004-2011 Jakob Schiotz and Center for Individual
# Nanoparticle Functionality, Department of Physics, Technical
# University of Denmark. Email: schiotz@fysik.dtu.dk
#
# This file is part of Asap version 3.
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# version 3 as published by the Free Software Foundation. Permission
# to use other versions of the GNU Lesser General Public License may
# granted by Jakob Schiotz or the head of department of the
# Department of Physics, Technical University of Denmark, as
# described in section 14 of the GNU General Public License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# and the GNU Lesser Public License along with this program. If not,
# see <http://www.gnu.org/licenses/>.
"""The Asap module.
The following **classes** are defined in this module:
`ListOfAtoms`: A list of atoms object used for the simulations.
`ParallelAtoms`: A parallel version of `ListOfAtoms`. Only defined if
parallel simulations are possible. Should not be created directly,
use `MakeParallelAtoms`.
`EMT`: The Effective Medium Theory potential.
`MonteCarloEMT`: EMT potential with optimizations for Monte Carlo simulations.
`MoPotential`: An experimental molybdenum potential.
`LennardJones`: Calculator using Lennard-Jones potential.
`Morse`: Calculator using Morse potential.
`BrennerPotential`: Calculates the Brenner potential.
The following **functions** are defined:
`Verbose`: Changes the verbosity level of the C code.
`CNA`: Runs Common Neighbor Analysis on a (Parallel)ListOfAtoms object.
`CoordinationNumber`: Calculates coordination numbers for the atoms
in a (Parallel)ListOfAtoms object.
`MakeParallelAtoms`: A factory function creating a ParallelAtoms
object safely. Only defined if parallel simulations are possible.
This module detects if parallel simulations are possible, and then
loads the serial or the parallel version of the C module into memory.
If parallel simulations are possible, a Python exit function is set,
so MPI is shut down if one of the processes exits.
*Note on the automatic documentation*: Most of the classes mentioned
above are not listed below. This is because the way epydoc_ works
interacts badly with the structure of these modules. Click on
class/function name above to see the documentation.
.. _epydoc: http://epydoc.sf.net
"""
__docformat__ = "restructuredtext en"
import sys, os
from asap3.version import __version__
from asap3.Internal.Builtins import _asap, parallelpossible, AsapError
from asap3.Internal.UtilityFunctions import print_version, get_version, \
get_short_version, DebugOutput, memory_usage, print_memory
from asap3.Internal.BuiltinPotentials import EMT, MonteCarloEMT, \
EMTParameters, EMTDefaultParameters, EMTRasmussenParameters, \
LennardJones, BrennerPotential, Morse, EMT2013, EMT2011, RGL, Gupta
from asap3.Internal.EMTParameters import EMTStandardParameters, \
EMThcpParameters, EMTMetalGlassParameters
from asap3.Internal.Threads import AsapThreads
from asap3.Internal.MonteCarloAtoms import MonteCarloAtoms
from asap3.Internal.checkversion import check_version
from asap3.analysis.localstructure import CNA, CoordinationNumbers, FullCNA
from asap3.io.trajectory import PickleTrajectory
from asap3.io.bundletrajectory import BundleTrajectory
from asap3.md.verlet import VelocityVerlet
from asap3.md.langevin import Langevin
from asap3.md import MDLogger
if parallelpossible:
from asap3.Internal.ParallelListOfAtoms import ParallelAtoms, \
MakeParallelAtoms
from asap3.Internal.Collector import Collector
from ase.parallel import paropen
# OpenKIM may or may not be built into Asap
from asap3.Internal.BuiltinPotentials import OpenKIMsupported
if OpenKIMsupported:
from asap3.Internal.OpenKIMcalculator import OpenKIMcalculator, OpenKIMinfo
import ase
from ase import Atoms
from ase.visualize import view
import ase.units as units
from ase.md.velocitydistribution import MaxwellBoltzmannDistribution, Stationary
NeighborList = _asap.NeighborList
NeighborCellLocator = _asap.NeighborCellLocator
FullNeighborList = _asap.FullNeighborList
set_verbose = _asap.set_verbose
heap_mallinfo = _asap.heap_mallinfo
# asap3.fixepydoc.fix(ListOfAtoms, EMT, MoPotential, BrennerPotential, LJPotential, EMTDefaultParameterProvider,
# EMTRasmussenParameterProvider, EMTVariableParameterProvider,
# EMThcpParameterProvider, NeighborList)
# if parallelpossible:
# Asap.fixepydoc.fix(ParallelAtoms, MakeParallelAtoms)
#timeunit = 1.018047e-14 # Seconds
#femtosecond = 1e-15 / timeunit # Femtosecond in atomic units
#eV_per_cubic_angstrom = 1.60219e11 # Stress unit in Pascal
#gigapascal = 1e9 / eV_per_cubic_angstrom # GPa in atomic units
#kB = 8.61734e-5 # Boltmann's constant in eV/Kelvin
# Set the default verbosity level to 0
#Verbose(0)
# Check Asap installation for consistency
check_version()
|
auag92/n2dm
|
Asap-3.8.4/Python/asap3/__init__.py
|
Python
|
mit
| 5,307
|
[
"ASE",
"OpenKIM"
] |
905d7b6ef15ed32a12a5139cbdb27aacd375dc4599cca8bbd9766a7c85c69983
|
"""
@name: Modules/House/_test/test_house.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2013-2020 by D. Brian Kimmel
@license: MIT License
@note: Created on Apr 8, 2013
@summary: Test handling the information for a house.
Passed all 11 tests - DBK - 2020-01-27
"""
from Modules.House import HouseInformation
__updated__ = '2020-02-02'
# Import system type stuff
from twisted.trial import unittest
from ruamel.yaml import YAML
# Import PyMh files and modules.
from _test.testing_mixin import SetupPyHouseObj
from Modules.House.house import Api as houseApi
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
TEST_YAML = """\
House:
Name: PinkPoppy
Modules: # Uncomment to use module.
- Family
- Entertainment
# - HVAC
# - Irrigation
- Lighting
# - Pool
# - Rules
- Scheduling
# - Security
# - Sync
"""
class SetupMixin(object):
def setUp(self):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj()
l_yaml = YAML()
self.m_test_config = l_yaml.load(TEST_YAML)
class A0(unittest.TestCase):
def test_00_Print(self):
_x = PrettyFormatAny.form('_test', 'title', 190) # so it is defined when printing is cleaned up.
print('Id: test_house')
class A1_Setup(SetupMixin, unittest.TestCase):
"""
This section will verify the XML in the 'Modules.text.xml_data' file is correct and what the node_local
module can read/write.
"""
def setUp(self):
SetupMixin.setUp(self)
def test_01_PyHouse(self):
print(PrettyFormatAny.form(self.m_pyhouse_obj, 'A1-01-A - PyHouse'))
self.assertIsNotNone(self.m_pyhouse_obj)
def test_02_House(self):
print(PrettyFormatAny.form(self.m_pyhouse_obj.House, 'A1-02-A - House'))
self.assertIsNotNone(self.m_pyhouse_obj.House)
self.assertIsInstance(self.m_pyhouse_obj.House, HouseInformation)
def test_03_Location(self):
print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Location, 'A1-03-A - Location'))
self.assertIsNotNone(self.m_pyhouse_obj.House.Location)
class C1_Read(SetupMixin, unittest.TestCase):
"""
This section tests the reading of the config used by house.
"""
def setUp(self):
SetupMixin.setUp(self)
def test_01_Load(self):
"""
"""
class C2_Write(SetupMixin, unittest.TestCase):
"""
This section tests the writing of XML used by house.
"""
def setUp(self):
SetupMixin.setUp(self)
class P1_Api(SetupMixin, unittest.TestCase):
""" Test the major Api functions
"""
def setUp(self):
SetupMixin.setUp(self)
self.m_api = houseApi(self.m_pyhouse_obj)
def test_01_Init(self):
""" Create a JSON object for Location.
"""
# print(PrettyFormatAny.form(self.m_api, 'P1-01-A - Api'))
pass
def test_02_Load(self):
"""
"""
# print(PrettyFormatAny.form(l_xml, 'P1-02-A - Api'))
def test_03_Start(self):
pass
def test_04_SaveXml(self):
"""
"""
# self.m_api.LoadConfig()
# print(PrettyFormatAny.form(self.m_pyhouse_obj.House, 'P1-04-A - House'))
# print(PrettyFormatAny.form(self.m_pyhouse_obj._Families, 'P1-04-B - House'))
# print(PrettyFormatAny.form(l_xml, 'P1-04-D - Api'))
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/House/_test/test_house.py
|
Python
|
mit
| 3,437
|
[
"Brian"
] |
8bfe0f5c7bacddbe77452ead2550182651e43ea35b63b7db8e48fae47cef0595
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import collections
import logging
import operator
import unittest
from nose_parameterized import parameterized
import datetime
import pytz
import itertools
from six.moves import range, zip
import zipline.utils.factory as factory
import zipline.finance.performance as perf
from zipline.finance.slippage import Transaction, create_transaction
import zipline.utils.math_utils as zp_math
from zipline.gens.composites import date_sorted_sources
from zipline.finance.trading import SimulationParameters
from zipline.finance.blotter import Order
from zipline.finance.commission import PerShare, PerTrade, PerDollar
from zipline.finance import trading
from zipline.protocol import DATASOURCE_TYPE
from zipline.utils.factory import create_random_simulation_parameters
import zipline.protocol
from zipline.protocol import Event
logger = logging.getLogger('Test Perf Tracking')
onesec = datetime.timedelta(seconds=1)
oneday = datetime.timedelta(days=1)
tradingday = datetime.timedelta(hours=6, minutes=30)
def create_txn(event, price, amount):
mock_order = Order(None, None, event.sid, id=None)
txn = create_transaction(event, mock_order, price, amount)
txn.source_id = 'MockTransactionSource'
return txn
def benchmark_events_in_range(sim_params):
return [
Event({'dt': dt,
'returns': ret,
'type':
zipline.protocol.DATASOURCE_TYPE.BENCHMARK,
'source_id': 'benchmarks'})
for dt, ret in trading.environment.benchmark_returns.iterkv()
if dt.date() >= sim_params.period_start.date()
and dt.date() <= sim_params.period_end.date()
]
def calculate_results(host, events):
perf_tracker = perf.PerformanceTracker(host.sim_params)
events = sorted(events, key=lambda ev: ev.dt)
all_events = date_sorted_sources(events, host.benchmark_events)
filtered_events = (filt_event for filt_event in all_events
if filt_event.dt <= events[-1].dt)
grouped_events = itertools.groupby(filtered_events, lambda x: x.dt)
results = []
bm_updated = False
for date, group in grouped_events:
for event in group:
perf_tracker.process_event(event)
if event.type == DATASOURCE_TYPE.BENCHMARK:
bm_updated = True
if bm_updated:
msg = perf_tracker.handle_market_close()
results.append(msg)
bm_updated = False
return results
class TestSplitPerformance(unittest.TestCase):
def setUp(self):
self.sim_params, self.dt, self.end_dt = \
create_random_simulation_parameters()
# start with $10,000
self.sim_params.capital_base = 10e3
self.benchmark_events = benchmark_events_in_range(self.sim_params)
def test_split_long_position(self):
with trading.TradingEnvironment() as env:
events = factory.create_trade_history(
1,
[20, 20],
[100, 100],
oneday,
self.sim_params
)
# set up a long position in sid 1
# 100 shares at $20 apiece = $2000 position
events.insert(0, create_txn(events[0], 20, 100))
# set up a split with ratio 3
events.append(factory.create_split(1, 3,
env.next_trading_day(events[1].dt)))
results = calculate_results(self, events)
# should have 33 shares (at $60 apiece) and $20 in cash
self.assertEqual(2, len(results))
latest_positions = results[1]['daily_perf']['positions']
self.assertEqual(1, len(latest_positions))
# check the last position to make sure it's been updated
position = latest_positions[0]
self.assertEqual(1, position['sid'])
self.assertEqual(33, position['amount'])
self.assertEqual(60, position['cost_basis'])
self.assertEqual(60, position['last_sale_price'])
# since we started with $10000, and we spent $2000 on the
# position, but then got $20 back, we should have $8020
# (or close to it) in cash.
# we won't get exactly 8020 because sometimes a split is
# denoted as a ratio like 0.3333, and we lose some digits
# of precision. thus, make sure we're pretty close.
daily_perf = results[1]['daily_perf']
self.assertTrue(
zp_math.tolerant_equals(8020,
daily_perf['ending_cash'], 1))
for i, result in enumerate(results):
for perf_kind in ('daily_perf', 'cumulative_perf'):
perf_result = result[perf_kind]
# prices aren't changing, so pnl and returns should be 0.0
self.assertEqual(0.0, perf_result['pnl'],
"day %s %s pnl %s instead of 0.0" %
(i, perf_kind, perf_result['pnl']))
self.assertEqual(0.0, perf_result['returns'],
"day %s %s returns %s instead of 0.0" %
(i, perf_kind, perf_result['returns']))
class TestCommissionEvents(unittest.TestCase):
def setUp(self):
self.sim_params, self.dt, self.end_dt = \
create_random_simulation_parameters()
logger.info("sim_params: %s, dt: %s, end_dt: %s" %
(self.sim_params, self.dt, self.end_dt))
self.sim_params.capital_base = 10e3
self.benchmark_events = benchmark_events_in_range(self.sim_params)
def test_commission_event(self):
with trading.TradingEnvironment():
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params
)
# Test commission models and validate result
# Expected commission amounts:
# PerShare commission: 1.00, 1.00, 1.50 = $3.50
# PerTrade commission: 5.00, 5.00, 5.00 = $15.00
# PerDollar commission: 1.50, 3.00, 4.50 = $9.00
# Total commission = $3.50 + $15.00 + $9.00 = $27.50
# Create 3 transactions: 50, 100, 150 shares traded @ $20
transactions = [create_txn(events[0], 20, i)
for i in [50, 100, 150]]
# Create commission models
models = [PerShare(cost=0.01, min_trade_cost=1.00),
PerTrade(cost=5.00),
PerDollar(cost=0.0015)]
# Aggregate commission amounts
total_commission = 0
for model in models:
for trade in transactions:
total_commission += model.calculate(trade)[1]
self.assertEqual(total_commission, 27.5)
cash_adj_dt = self.sim_params.first_open \
+ datetime.timedelta(hours=3)
cash_adjustment = factory.create_commission(1, 300.0,
cash_adj_dt)
# Insert a purchase order.
events.insert(0, create_txn(events[0], 20, 1))
events.insert(1, cash_adjustment)
results = calculate_results(self, events)
# Validate that we lost 320 dollars from our cash pool.
self.assertEqual(results[-1]['cumulative_perf']['ending_cash'],
9680)
# Validate that the cost basis of our position changed.
self.assertEqual(results[-1]['daily_perf']['positions']
[0]['cost_basis'], 320.0)
def test_commission_zero_position(self):
"""
Ensure no div-by-zero errors.
"""
with trading.TradingEnvironment():
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params
)
cash_adj_dt = self.sim_params.first_open \
+ datetime.timedelta(hours=3)
cash_adjustment = factory.create_commission(1, 300.0,
cash_adj_dt)
# Insert a purchase order.
events.insert(0, create_txn(events[0], 20, 1))
# Sell that order.
events.insert(1, create_txn(events[1], 20, -1))
events.insert(2, cash_adjustment)
results = calculate_results(self, events)
# Validate that we lost 300 dollars from our cash pool.
self.assertEqual(results[-1]['cumulative_perf']['ending_cash'],
9700)
def test_commission_no_position(self):
"""
Ensure no position-not-found or sid-not-found errors.
"""
with trading.TradingEnvironment():
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params
)
cash_adj_dt = self.sim_params.first_open \
+ datetime.timedelta(hours=3)
cash_adjustment = factory.create_commission(1, 300.0,
cash_adj_dt)
events.insert(0, cash_adjustment)
results = calculate_results(self, events)
# Validate that we lost 300 dollars from our cash pool.
self.assertEqual(results[-1]['cumulative_perf']['ending_cash'],
9700)
class TestDividendPerformance(unittest.TestCase):
def setUp(self):
self.sim_params, self.dt, self.end_dt = \
create_random_simulation_parameters()
self.sim_params.capital_base = 10e3
self.benchmark_events = benchmark_events_in_range(self.sim_params)
def test_market_hours_calculations(self):
with trading.TradingEnvironment():
# DST in US/Eastern began on Sunday March 14, 2010
before = datetime.datetime(2010, 3, 12, 14, 31, tzinfo=pytz.utc)
after = factory.get_next_trading_dt(
before,
datetime.timedelta(days=1)
)
self.assertEqual(after.hour, 13)
def test_long_position_receives_dividend(self):
with trading.TradingEnvironment():
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params
)
dividend = factory.create_dividend(
1,
10.00,
# declared date, when the algorithm finds out about
# the dividend
events[1].dt,
# ex_date, when the algorithm is credited with the
# dividend
events[1].dt,
# pay date, when the algorithm receives the dividend.
events[2].dt
)
txn = create_txn(events[0], 10.0, 100)
events.insert(0, txn)
events.insert(1, dividend)
results = calculate_results(self, events)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.1, 0.1, 0.1])
daily_returns = [event['daily_perf']['returns']
for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.10, 0.0, 0.0])
cash_flows = [event['daily_perf']['capital_used']
for event in results]
self.assertEqual(cash_flows, [-1000, 0, 1000, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [-1000, -1000, 0, 0, 0])
cash_pos = \
[event['cumulative_perf']['ending_cash'] for event in results]
self.assertEqual(cash_pos, [9000, 9000, 10000, 10000, 10000])
def test_long_position_receives_stock_dividend(self):
with trading.TradingEnvironment():
# post some trades in the market
events = []
for sid in (1, 2):
events.extend(
factory.create_trade_history(
sid,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params)
)
dividend = factory.create_stock_dividend(
1,
payment_sid=2,
ratio=2,
# declared date, when the algorithm finds out about
# the dividend
declared_date=events[1].dt,
# ex_date, when the algorithm is credited with the
# dividend
ex_date=events[1].dt,
# pay date, when the algorithm receives the dividend.
pay_date=events[2].dt
)
txn = create_txn(events[0], 10.0, 100)
events.insert(0, txn)
events.insert(1, dividend)
results = calculate_results(self, events)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.2, 0.2, 0.2])
daily_returns = [event['daily_perf']['returns']
for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.2, 0.0, 0.0])
cash_flows = [event['daily_perf']['capital_used']
for event in results]
self.assertEqual(cash_flows, [-1000, 0, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [-1000] * 5)
cash_pos = \
[event['cumulative_perf']['ending_cash'] for event in results]
self.assertEqual(cash_pos, [9000] * 5)
def test_post_ex_long_position_receives_no_dividend(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params
)
dividend = factory.create_dividend(
1,
10.00,
events[0].dt,
events[1].dt,
events[2].dt
)
events.insert(1, dividend)
txn = create_txn(events[3], 10.0, 100)
events.insert(4, txn)
results = calculate_results(self, events)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0, 0, 0, 0, 0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0, 0, 0, 0, 0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, 0, -1000, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [0, 0, -1000, -1000, -1000])
def test_selling_before_dividend_payment_still_gets_paid(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params
)
dividend = factory.create_dividend(
1,
10.00,
events[0].dt,
events[1].dt,
events[3].dt
)
buy_txn = create_txn(events[0], 10.0, 100)
events.insert(1, buy_txn)
sell_txn = create_txn(events[3], 10.0, -100)
events.insert(4, sell_txn)
events.insert(0, dividend)
results = calculate_results(self, events)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0, 0, 0, 0.1, 0.1])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0, 0, 0, 0.1, 0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [-1000, 0, 1000, 1000, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [-1000, -1000, 0, 1000, 1000])
def test_buy_and_sell_before_ex(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10, 10],
[100, 100, 100, 100, 100, 100],
oneday,
self.sim_params
)
dividend = factory.create_dividend(
1,
10.00,
events[3].dt,
events[4].dt,
events[5].dt
)
buy_txn = create_txn(events[1], 10.0, 100)
events.insert(1, buy_txn)
sell_txn = create_txn(events[3], 10.0, -100)
events.insert(3, sell_txn)
events.insert(1, dividend)
results = calculate_results(self, events)
self.assertEqual(len(results), 6)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0, 0, 0, 0, 0, 0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0, 0, 0, 0, 0, 0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, -1000, 1000, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [0, -1000, 0, 0, 0, 0])
def test_ending_before_pay_date(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params
)
pay_date = self.sim_params.first_open
# find pay date that is much later.
for i in range(30):
pay_date = factory.get_next_trading_dt(pay_date, oneday)
dividend = factory.create_dividend(
1,
10.00,
events[0].dt,
events[1].dt,
pay_date
)
buy_txn = create_txn(events[1], 10.0, 100)
events.insert(2, buy_txn)
events.insert(1, dividend)
results = calculate_results(self, events)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0, 0, 0, 0.0, 0.0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0, 0, 0, 0, 0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, -1000, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(
cumulative_cash_flows,
[0, -1000, -1000, -1000, -1000]
)
def test_short_position_pays_dividend(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params
)
dividend = factory.create_dividend(
1,
10.00,
# declare at open of test
events[0].dt,
# ex_date same as trade 2
events[2].dt,
events[3].dt
)
txn = create_txn(events[1], 10.0, -100)
events.insert(1, txn)
events.insert(0, dividend)
results = calculate_results(self, events)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, -0.1, -0.1])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.0, -0.1, 0.0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, 1000, 0, -1000, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [0, 1000, 1000, 0, 0])
def test_no_position_receives_no_dividend(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params
)
dividend = factory.create_dividend(
1,
10.00,
events[0].dt,
events[1].dt,
events[2].dt
)
events.insert(1, dividend)
results = calculate_results(self, events)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, 0.0, 0.0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.0, 0.0, 0.0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, 0, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [0, 0, 0, 0, 0])
class TestDividendPerformanceHolidayStyle(TestDividendPerformance):
# The holiday tests begins the simulation on the day
# before Thanksgiving, so that the next trading day is
# two days ahead. Any tests that hard code events
# to be start + oneday will fail, since those events will
# be skipped by the simulation.
def setUp(self):
self.dt = datetime.datetime(2003, 11, 30, tzinfo=pytz.utc)
self.end_dt = datetime.datetime(2004, 11, 25, tzinfo=pytz.utc)
self.sim_params = SimulationParameters(
self.dt,
self.end_dt)
self.benchmark_events = benchmark_events_in_range(self.sim_params)
class TestPositionPerformance(unittest.TestCase):
def setUp(self):
self.sim_params, self.dt, self.end_dt = \
create_random_simulation_parameters()
self.benchmark_events = benchmark_events_in_range(self.sim_params)
def test_long_position(self):
"""
verify that the performance period calculates properly for a
single buy transaction
"""
# post some trades in the market
trades = factory.create_trade_history(
1,
[10, 10, 10, 11],
[100, 100, 100, 100],
onesec,
self.sim_params
)
txn = create_txn(trades[1], 10.0, 100)
pp = perf.PerformancePeriod(1000.0)
pp.execute_transaction(txn)
for trade in trades:
pp.update_last_sale(trade)
pp.calculate_performance()
self.assertEqual(
pp.period_cash_flow,
-1 * txn.price * txn.amount,
"capital used should be equal to the opposite of the transaction \
cost of sole txn in test"
)
self.assertEqual(
len(pp.positions),
1,
"should be just one position")
self.assertEqual(
pp.positions[1].sid,
txn.sid,
"position should be in security with id 1")
self.assertEqual(
pp.positions[1].amount,
txn.amount,
"should have a position of {sharecount} shares".format(
sharecount=txn.amount
)
)
self.assertEqual(
pp.positions[1].cost_basis,
txn.price,
"should have a cost basis of 10"
)
self.assertEqual(
pp.positions[1].last_sale_price,
trades[-1]['price'],
"last sale should be same as last trade. \
expected {exp} actual {act}".format(
exp=trades[-1]['price'],
act=pp.positions[1].last_sale_price)
)
self.assertEqual(
pp.ending_value,
1100,
"ending value should be price of last trade times number of \
shares in position"
)
self.assertEqual(pp.pnl, 100, "gain of 1 on 100 shares should be 100")
def test_short_position(self):
"""verify that the performance period calculates properly for a \
single short-sale transaction"""
trades = factory.create_trade_history(
1,
[10, 10, 10, 11, 10, 9],
[100, 100, 100, 100, 100, 100],
onesec,
self.sim_params
)
trades_1 = trades[:-2]
txn = create_txn(trades[1], 10.0, -100)
pp = perf.PerformancePeriod(1000.0)
pp.execute_transaction(txn)
for trade in trades_1:
pp.update_last_sale(trade)
pp.calculate_performance()
self.assertEqual(
pp.period_cash_flow,
-1 * txn.price * txn.amount,
"capital used should be equal to the opposite of the transaction\
cost of sole txn in test"
)
self.assertEqual(
len(pp.positions),
1,
"should be just one position")
self.assertEqual(
pp.positions[1].sid,
txn.sid,
"position should be in security from the transaction"
)
self.assertEqual(
pp.positions[1].amount,
-100,
"should have a position of -100 shares"
)
self.assertEqual(
pp.positions[1].cost_basis,
txn.price,
"should have a cost basis of 10"
)
self.assertEqual(
pp.positions[1].last_sale_price,
trades_1[-1]['price'],
"last sale should be price of last trade"
)
self.assertEqual(
pp.ending_value,
-1100,
"ending value should be price of last trade times number of \
shares in position"
)
self.assertEqual(pp.pnl, -100, "gain of 1 on 100 shares should be 100")
# simulate additional trades, and ensure that the position value
# reflects the new price
trades_2 = trades[-2:]
# simulate a rollover to a new period
pp.rollover()
for trade in trades_2:
pp.update_last_sale(trade)
pp.calculate_performance()
self.assertEqual(
pp.period_cash_flow,
0,
"capital used should be zero, there were no transactions in \
performance period"
)
self.assertEqual(
len(pp.positions),
1,
"should be just one position"
)
self.assertEqual(
pp.positions[1].sid,
txn.sid,
"position should be in security from the transaction"
)
self.assertEqual(
pp.positions[1].amount,
-100,
"should have a position of -100 shares"
)
self.assertEqual(
pp.positions[1].cost_basis,
txn.price,
"should have a cost basis of 10"
)
self.assertEqual(
pp.positions[1].last_sale_price,
trades_2[-1].price,
"last sale should be price of last trade"
)
self.assertEqual(
pp.ending_value,
-900,
"ending value should be price of last trade times number of \
shares in position")
self.assertEqual(
pp.pnl,
200,
"drop of 2 on -100 shares should be 200"
)
# now run a performance period encompassing the entire trade sample.
ppTotal = perf.PerformancePeriod(1000.0)
for trade in trades_1:
ppTotal.update_last_sale(trade)
ppTotal.execute_transaction(txn)
for trade in trades_2:
ppTotal.update_last_sale(trade)
ppTotal.calculate_performance()
self.assertEqual(
ppTotal.period_cash_flow,
-1 * txn.price * txn.amount,
"capital used should be equal to the opposite of the transaction \
cost of sole txn in test"
)
self.assertEqual(
len(ppTotal.positions),
1,
"should be just one position"
)
self.assertEqual(
ppTotal.positions[1].sid,
txn.sid,
"position should be in security from the transaction"
)
self.assertEqual(
ppTotal.positions[1].amount,
-100,
"should have a position of -100 shares"
)
self.assertEqual(
ppTotal.positions[1].cost_basis,
txn.price,
"should have a cost basis of 10"
)
self.assertEqual(
ppTotal.positions[1].last_sale_price,
trades_2[-1].price,
"last sale should be price of last trade"
)
self.assertEqual(
ppTotal.ending_value,
-900,
"ending value should be price of last trade times number of \
shares in position")
self.assertEqual(
ppTotal.pnl,
100,
"drop of 1 on -100 shares should be 100"
)
def test_covering_short(self):
"""verify performance where short is bought and covered, and shares \
trade after cover"""
trades = factory.create_trade_history(
1,
[10, 10, 10, 11, 9, 8, 7, 8, 9, 10],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
onesec,
self.sim_params
)
short_txn = create_txn(
trades[1],
10.0,
-100,
)
cover_txn = create_txn(trades[6], 7.0, 100)
pp = perf.PerformancePeriod(1000.0)
pp.execute_transaction(short_txn)
pp.execute_transaction(cover_txn)
for trade in trades:
pp.update_last_sale(trade)
pp.calculate_performance()
short_txn_cost = short_txn.price * short_txn.amount
cover_txn_cost = cover_txn.price * cover_txn.amount
self.assertEqual(
pp.period_cash_flow,
-1 * short_txn_cost - cover_txn_cost,
"capital used should be equal to the net transaction costs"
)
self.assertEqual(
len(pp.positions),
1,
"should be just one position"
)
self.assertEqual(
pp.positions[1].sid,
short_txn.sid,
"position should be in security from the transaction"
)
self.assertEqual(
pp.positions[1].amount,
0,
"should have a position of -100 shares"
)
self.assertEqual(
pp.positions[1].cost_basis,
0,
"a covered position should have a cost basis of 0"
)
self.assertEqual(
pp.positions[1].last_sale_price,
trades[-1].price,
"last sale should be price of last trade"
)
self.assertEqual(
pp.ending_value,
0,
"ending value should be price of last trade times number of \
shares in position"
)
self.assertEqual(
pp.pnl,
300,
"gain of 1 on 100 shares should be 300"
)
def test_cost_basis_calc(self):
history_args = (
1,
[10, 11, 11, 12],
[100, 100, 100, 100],
onesec,
self.sim_params
)
trades = factory.create_trade_history(*history_args)
transactions = factory.create_txn_history(*history_args)
pp = perf.PerformancePeriod(1000.0)
average_cost = 0
for i, txn in enumerate(transactions):
pp.execute_transaction(txn)
average_cost = (average_cost * i + txn.price) / (i + 1)
self.assertEqual(pp.positions[1].cost_basis, average_cost)
for trade in trades:
pp.update_last_sale(trade)
pp.calculate_performance()
self.assertEqual(
pp.positions[1].last_sale_price,
trades[-1].price,
"should have a last sale of 12, got {val}".format(
val=pp.positions[1].last_sale_price)
)
self.assertEqual(
pp.positions[1].cost_basis,
11,
"should have a cost basis of 11"
)
self.assertEqual(
pp.pnl,
400
)
down_tick = factory.create_trade(
1,
10.0,
100,
trades[-1].dt + onesec)
sale_txn = create_txn(
down_tick,
10.0,
-100)
pp.rollover()
pp.execute_transaction(sale_txn)
pp.update_last_sale(down_tick)
pp.calculate_performance()
self.assertEqual(
pp.positions[1].last_sale_price,
10,
"should have a last sale of 10, was {val}".format(
val=pp.positions[1].last_sale_price)
)
self.assertEqual(
pp.positions[1].cost_basis,
11,
"should have a cost basis of 11"
)
self.assertEqual(pp.pnl, -800, "this period goes from +400 to -400")
pp3 = perf.PerformancePeriod(1000.0)
average_cost = 0
for i, txn in enumerate(transactions):
pp3.execute_transaction(txn)
average_cost = (average_cost * i + txn.price) / (i + 1)
self.assertEqual(pp3.positions[1].cost_basis, average_cost)
pp3.execute_transaction(sale_txn)
trades.append(down_tick)
for trade in trades:
pp3.update_last_sale(trade)
pp3.calculate_performance()
self.assertEqual(
pp3.positions[1].last_sale_price,
10,
"should have a last sale of 10"
)
self.assertEqual(
pp3.positions[1].cost_basis,
11,
"should have a cost basis of 11"
)
self.assertEqual(
pp3.pnl,
-400,
"should be -400 for all trades and transactions in period"
)
def test_cost_basis_calc_close_pos(self):
history_args = (
1,
[10, 9, 11, 8, 9, 12, 13, 14],
[200, -100, -100, 100, -300, 100, 500, 400],
onesec,
self.sim_params
)
cost_bases = [10, 10, 0, 8, 9, 9, 13, 13.5]
trades = factory.create_trade_history(*history_args)
transactions = factory.create_txn_history(*history_args)
pp = perf.PerformancePeriod(1000.0)
for txn, cb in zip(transactions, cost_bases):
pp.execute_transaction(txn)
self.assertEqual(pp.positions[1].cost_basis, cb)
for trade in trades:
pp.update_last_sale(trade)
pp.calculate_performance()
self.assertEqual(pp.positions[1].cost_basis, cost_bases[-1])
class TestPerformanceTracker(unittest.TestCase):
NumDaysToDelete = collections.namedtuple(
'NumDaysToDelete', ('start', 'middle', 'end'))
@parameterized.expand([
("Don't delete any events",
NumDaysToDelete(start=0, middle=0, end=0)),
("Delete first day of events",
NumDaysToDelete(start=1, middle=0, end=0)),
("Delete first two days of events",
NumDaysToDelete(start=2, middle=0, end=0)),
("Delete one day of events from the middle",
NumDaysToDelete(start=0, middle=1, end=0)),
("Delete two events from the middle",
NumDaysToDelete(start=0, middle=2, end=0)),
("Delete last day of events",
NumDaysToDelete(start=0, middle=0, end=1)),
("Delete last two days of events",
NumDaysToDelete(start=0, middle=0, end=2)),
("Delete all but one event.",
NumDaysToDelete(start=2, middle=1, end=2)),
])
def test_tracker(self, parameter_comment, days_to_delete):
"""
@days_to_delete - configures which days in the data set we should
remove, used for ensuring that we still return performance messages
even when there is no data.
"""
# This date range covers Columbus day,
# however Columbus day is not a market holiday
#
# October 2008
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 31
start_dt = datetime.datetime(year=2008,
month=10,
day=9,
tzinfo=pytz.utc)
end_dt = datetime.datetime(year=2008,
month=10,
day=16,
tzinfo=pytz.utc)
trade_count = 6
sid = 133
price = 10.1
price_list = [price] * trade_count
volume = [100] * trade_count
trade_time_increment = datetime.timedelta(days=1)
sim_params = SimulationParameters(
period_start=start_dt,
period_end=end_dt
)
benchmark_events = benchmark_events_in_range(sim_params)
trade_history = factory.create_trade_history(
sid,
price_list,
volume,
trade_time_increment,
sim_params,
source_id="factory1"
)
sid2 = 134
price2 = 12.12
price2_list = [price2] * trade_count
trade_history2 = factory.create_trade_history(
sid2,
price2_list,
volume,
trade_time_increment,
sim_params,
source_id="factory2"
)
# 'middle' start of 3 depends on number of days == 7
middle = 3
# First delete from middle
if days_to_delete.middle:
del trade_history[middle:(middle + days_to_delete.middle)]
del trade_history2[middle:(middle + days_to_delete.middle)]
# Delete start
if days_to_delete.start:
del trade_history[:days_to_delete.start]
del trade_history2[:days_to_delete.start]
# Delete from end
if days_to_delete.end:
del trade_history[-days_to_delete.end:]
del trade_history2[-days_to_delete.end:]
sim_params.first_open = \
sim_params.calculate_first_open()
sim_params.last_close = \
sim_params.calculate_last_close()
sim_params.capital_base = 1000.0
sim_params.frame_index = [
'sid',
'volume',
'dt',
'price',
'changed']
perf_tracker = perf.PerformanceTracker(
sim_params
)
events = date_sorted_sources(trade_history, trade_history2)
events = [event for event in
self.trades_with_txns(events, trade_history[0].dt)]
# Extract events with transactions to use for verification.
txns = [event for event in
events if event.type == DATASOURCE_TYPE.TRANSACTION]
orders = [event for event in
events if event.type == DATASOURCE_TYPE.ORDER]
all_events = date_sorted_sources(events, benchmark_events)
filtered_events = [filt_event for filt_event
in all_events if filt_event.dt <= end_dt]
filtered_events.sort(key=lambda x: x.dt)
grouped_events = itertools.groupby(filtered_events, lambda x: x.dt)
perf_messages = []
for date, group in grouped_events:
for event in group:
perf_tracker.process_event(event)
msg = perf_tracker.handle_market_close()
perf_messages.append(msg)
self.assertEqual(perf_tracker.txn_count, len(txns))
self.assertEqual(perf_tracker.txn_count, len(orders))
cumulative_pos = perf_tracker.cumulative_performance.positions[sid]
expected_size = len(txns) / 2 * -25
self.assertEqual(cumulative_pos.amount, expected_size)
self.assertEqual(len(perf_messages),
sim_params.days_in_period)
def trades_with_txns(self, events, no_txn_dt):
for event in events:
# create a transaction for all but
# first trade in each sid, to simulate None transaction
if event.dt != no_txn_dt:
order = Order(
sid=event.sid,
amount=-25,
dt=event.dt
)
order.source_id = 'MockOrderSource'
yield order
yield event
txn = Transaction(
sid=event.sid,
amount=-25,
dt=event.dt,
price=10.0,
commission=0.50,
order_id=order.id
)
txn.source_id = 'MockTransactionSource'
yield txn
else:
yield event
def test_minute_tracker(self):
""" Tests minute performance tracking."""
with trading.TradingEnvironment():
start_dt = trading.environment.exchange_dt_in_utc(
datetime.datetime(2013, 3, 1, 9, 31))
end_dt = trading.environment.exchange_dt_in_utc(
datetime.datetime(2013, 3, 1, 16, 0))
sim_params = SimulationParameters(
period_start=start_dt,
period_end=end_dt,
emission_rate='minute'
)
tracker = perf.PerformanceTracker(sim_params)
foo_event_1 = factory.create_trade('foo', 10.0, 20, start_dt)
order_event_1 = Order(sid=foo_event_1.sid,
amount=-25,
dt=foo_event_1.dt)
bar_event_1 = factory.create_trade('bar', 100.0, 200, start_dt)
txn_event_1 = Transaction(sid=foo_event_1.sid,
amount=-25,
dt=foo_event_1.dt,
price=10.0,
commission=0.50,
order_id=order_event_1.id)
benchmark_event_1 = Event({
'dt': start_dt,
'returns': 0.01,
'type': DATASOURCE_TYPE.BENCHMARK
})
foo_event_2 = factory.create_trade(
'foo', 11.0, 20, start_dt + datetime.timedelta(minutes=1))
bar_event_2 = factory.create_trade(
'bar', 11.0, 20, start_dt + datetime.timedelta(minutes=1))
benchmark_event_2 = Event({
'dt': start_dt + datetime.timedelta(minutes=1),
'returns': 0.02,
'type': DATASOURCE_TYPE.BENCHMARK
})
events = [
foo_event_1,
order_event_1,
benchmark_event_1,
txn_event_1,
bar_event_1,
foo_event_2,
benchmark_event_2,
bar_event_2,
]
grouped_events = itertools.groupby(
events, operator.attrgetter('dt'))
messages = {}
for date, group in grouped_events:
tracker.set_date(date)
for event in group:
tracker.process_event(event)
tracker.handle_minute_close(date)
msg = tracker.to_dict()
messages[date] = msg
self.assertEquals(2, len(messages))
msg_1 = messages[foo_event_1.dt]
msg_2 = messages[foo_event_2.dt]
self.assertEquals(1, len(msg_1['minute_perf']['transactions']),
"The first message should contain one "
"transaction.")
# Check that transactions aren't emitted for previous events.
self.assertEquals(0, len(msg_2['minute_perf']['transactions']),
"The second message should have no "
"transactions.")
self.assertEquals(1, len(msg_1['minute_perf']['orders']),
"The first message should contain one orders.")
# Check that orders aren't emitted for previous events.
self.assertEquals(0, len(msg_2['minute_perf']['orders']),
"The second message should have no orders.")
# Ensure that period_close moves through time.
# Also, ensure that the period_closes are the expected dts.
self.assertEquals(foo_event_1.dt,
msg_1['minute_perf']['period_close'])
self.assertEquals(foo_event_2.dt,
msg_2['minute_perf']['period_close'])
# Ensure that a Sharpe value for cumulative metrics is being
# created.
self.assertIsNotNone(msg_1['cumulative_risk_metrics']['sharpe'])
self.assertIsNotNone(msg_2['cumulative_risk_metrics']['sharpe'])
|
wavelets/zipline
|
tests/test_perf_tracking.py
|
Python
|
apache-2.0
| 47,497
|
[
"COLUMBUS"
] |
dcd4a101ee05caa7ed50869e6deb007425705a5d3b142871ee01c9f2057cdc02
|
# Perovskites octahedral tilting extraction
# based on Surf.Sci.602 3674 (2008)
# http://dx.doi.org/10.1016/j.susc.2008.10.002
# Author: Evgeny Blokhin
#
# KNOWN BUG: in some low-symmetry cases ("batio3_lda_hw12d_160_to.out"),
# octahedra are not adjusted with the axes, and their distortion origin is unknown.
# Even if the rotation is absent (i.e. pseudo-cubic structure),
# an "artificial" rotation can be extracted
from __future__ import division
import math
from functools import reduce
from numpy.linalg import norm
from ase import Atom
from tilde.core.common import ModuleError #, generate_xyz
from tilde.core.constants import Perovskite_Structure
from tilde.core.symmetry import SymmetryFinder
class Perovskite_tilting():
OCTAHEDRON_BOND_LENGTH_LIMIT = 2.5 # Angstrom
OCTAHEDRON_ATOMS_Z_DIFFERENCE = 1.6 # Angstrom
MAX_TILTING_DEGREE = 22.4 # degrees, this is for adjusting, may produce unphysical results
def __init__(self, tilde_calc):
self.prec_angles = {} # non-rounded, non-unique, all-planes angles
self.angles = {} # rounded, unique, one-plane angles
symm = SymmetryFinder()
symm.refine_cell(tilde_calc)
if symm.error:
raise ModuleError("Cell refinement error: %s" % symm.error)
# check if the longest axis is Z, rotate otherwise
lengths = list(map(norm, symm.refinedcell.cell)) # Py3
if not (lengths[2] - lengths[0] > 1E-6 and lengths[2] - lengths[1] > 1E-6):
axnames = {0: 'x', 1: 'y'}
principal_ax = axnames[ lengths.index(max(lengths[0], lengths[1])) ]
symm.refinedcell.rotate(principal_ax, 'z', rotate_cell = True)
self.virtual_atoms = symm.refinedcell.copy()
#with open('tilting.xyz', 'w') as f:
# f.write(generate_xyz(self.virtual_atoms))
# translate atoms around octahedra in all directions
shift_dirs = [(1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, -1, 0), (1, 1, 0), (1, -1, 0), (-1, -1, 0), (-1, 1, 0), (0, 0, 1), (0, 0, -1)]
for k, i in enumerate(symm.refinedcell):
if i.symbol in Perovskite_Structure.C:
for sdir in shift_dirs:
self.translate(k, symm.refinedcell.cell, sdir, self.virtual_atoms)
# extract octahedra and their main tilting planes
for octahedron in self.get_octahedra(symm.refinedcell, symm.refinedcell.periodicity):
#print 'octahedron:', octahedron[0]+1 #, self.virtual_atoms[octahedron[0]].symbol, self.virtual_atoms[octahedron[0]].x, self.virtual_atoms[octahedron[0]].y, self.virtual_atoms[octahedron[0]].z
#print 'corners:', [i+1 for i in octahedron[1]]
# Option 1. Extract only one tilting plane, the closest to perpendicular to Z-axis
'''tiltplane = self.get_tiltplane(octahedron[1])
if len(tiltplane) == 4:
t = self.get_tilting(tiltplane)
#print 'result:', [i+1 for i in tiltplane], t
self.prec_angles.update( { octahedron[0]: [ t ] } )'''
# Option 2. Extract all three possible tilting planes,
# try to spot the closest to perpendicular to Z-axis
# and consider the smallest tilting
plane_tilting = []
for oplane in self.get_tiltplanes(octahedron[1]):
t = self.get_tilting(oplane)
#print "result:", [i+1 for i in oplane], t
plane_tilting.append( t )
self.prec_angles.update( { octahedron[0]: plane_tilting } )
if not self.prec_angles: raise ModuleError("Cannot find any main tilting plane!")
# uniquify and round self.prec_angles to obtain self.angles
u, todel = [], []
for o in self.prec_angles:
self.prec_angles[o] = reduce(lambda x, y: x if sum(x) <= sum(y) else y, self.prec_angles[o]) # only minimal angles are taken if tilting planes vary!
self.prec_angles[o] = list(map(lambda x: list(map(lambda y: round(y, 2), x)), [self.prec_angles[o]])) # Py3
for i in self.prec_angles[o]:
u.append([o] + i)
u = sorted(u, key=lambda x:x[0])
u.reverse() # to make index of oct.centers minimal
for i in u:
for j in range(u.index(i)+1, len(u)):
if i[1:] == u[j][1:]:
todel.append(u.index(i))
continue
for i in [j for j in u if u.index(j) not in todel]:
self.angles[ i[0]+1 ] = i[1:] # atomic index is counted from zero!
def translate(self, num_of_atom, cell, components, reference):
a_component, b_component, c_component = components
reference.append(Atom(
reference[num_of_atom].symbol,
(reference[num_of_atom].x + a_component * cell[0][0] + b_component * cell[1][0] + c_component * cell[2][0],
reference[num_of_atom].y + a_component * cell[0][1] + b_component * cell[1][1] + c_component * cell[2][1],
reference[num_of_atom].z + a_component * cell[0][2] + b_component * cell[1][2] + c_component * cell[2][2])
))
def get_bisector_point(self, num_of_A, num_of_O, num_of_B, reference):
xA = reference[num_of_A].x
yA = reference[num_of_A].y
zA = reference[num_of_A].z
xO = reference[num_of_O].x
yO = reference[num_of_O].y
zO = reference[num_of_O].z
xB = reference[num_of_B].x
yB = reference[num_of_B].y
zB = reference[num_of_B].z
m = self.virtual_atoms.get_distance(num_of_O, num_of_A)
n = self.virtual_atoms.get_distance(num_of_O, num_of_B)
# bisector length
l = 2 * m * n * math.cos(math.radians(self.virtual_atoms.get_angle(num_of_A, num_of_O, num_of_B) / 2)) / (m + n)
v = math.sqrt(n**2 - n * l**2 / m)
u = m * v / n
A = yA*(zO - zB) + yO*(zB - zA) + yB*(zA - zO)
B = zA*(xO - xB) + zO*(xB - xA) + zB*(xA - xO)
C = xA*(yO - yB) + xO*(yB - yA) + xB*(yA - yO)
if C == 0: C = 1E-10 # prevent zero division
D = xA*(yO*zB - yB*zO) + xO*(yB*zA - yA*zB) + xB*(yA*zO - yO*zA)
D *= -1
# from surface analytical equation
x = (xA + u*xB/v)/(1+u/v)
y = (yA + u*yB/v)/(1+u/v)
z = -((A*x + B*y + D) / C)
return [x, y, z]
def get_octahedra(self, atoms, periodicity=3):
'''
Extract octahedra as lists of sequence numbers of corner atoms
'''
octahedra = []
for n, i in enumerate(atoms):
found = []
if i.symbol in Perovskite_Structure.B:
for m, j in enumerate(self.virtual_atoms):
if j.symbol in Perovskite_Structure.C and self.virtual_atoms.get_distance(n, m) <= self.OCTAHEDRON_BOND_LENGTH_LIMIT:
found.append(m)
if (periodicity == 3 and len(found) == 6) or (periodicity == 2 and len(found) in [5, 6]):
octahedra.append([n, found])
if not len(octahedra): raise ModuleError("Cannot extract valid octahedra: not enough corner atoms found!")
return octahedra
def get_tiltplane(self, sequence):
'''
Extract the main tilting plane basing on Z coordinate
'''
sequence = sorted(sequence, key=lambda x: self.virtual_atoms[ x ].z)
in_plane = []
for i in range(0, len(sequence)-4):
if abs(self.virtual_atoms[ sequence[i] ].z - self.virtual_atoms[ sequence[i+1] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE and \
abs(self.virtual_atoms[ sequence[i+1] ].z - self.virtual_atoms[ sequence[i+2] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE and \
abs(self.virtual_atoms[ sequence[i+2] ].z - self.virtual_atoms[ sequence[i+3] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE:
in_plane = [sequence[j] for j in range(i, i+4)]
return in_plane
def get_tiltplanes(self, sequence):
'''
Extract tilting planes basing on distance map
'''
tilting_planes = []
distance_map = []
for i in range(1, len(sequence)):
distance_map.append([ sequence[i], self.virtual_atoms.get_distance( sequence[0], sequence[i] ) ])
distance_map = sorted(distance_map, key=lambda x: x[1])
if len(distance_map) == 4:
# surface edge case
# semi-octahedron at surface edge has only one tilting plane to consider
sorted_dist = [i[0] for i in distance_map]
if distance_map[-1][1] - distance_map[-2][1] < 0.5:
# 1st case: max diff < 0.5 Angstrom,
# meaning all distances to reference atom are similar,
# therefore the reference atom is above the searched plane
# and the searched plane consists of other atoms
tilting_planes.append( [ i[0] for i in distance_map ] )
else:
# 2nd case: reference atom belongs to the searched plane,
# procedure needs to be repeated with the next atom as reference atom
candidates = [sequence[0], sorted_dist[-1]]
next_distance_map = []
next_distance_map.append([ sorted_dist[1], self.virtual_atoms.get_distance( sorted_dist[0], sorted_dist[1] ) ])
next_distance_map.append([ sorted_dist[2], self.virtual_atoms.get_distance( sorted_dist[0], sorted_dist[2] ) ])
next_distance_map = sorted(next_distance_map, key=lambda x: x[1])
next_sorted_dist = [i[0] for i in next_distance_map]
# the next reference atom is taken above the plane (distances are similar)
if next_distance_map[1][1] - next_distance_map[0][1] < 0.5: candidates.extend([ next_sorted_dist[0], next_sorted_dist[1] ])
# the next reference atom is taken in the plane (distances are different)
else: candidates.extend([ sorted_dist[0], next_sorted_dist[1] ])
tilting_planes.append(candidates)
elif len(distance_map) == 5:
# full octahedron case
# full octahedron has 3 different tilting planes (perpendicular in ideal case)
sorted_dist = [i[0] for i in distance_map]
# 1st plane is found as:
first_plane = sorted_dist[0:4]
tilting_planes.append(first_plane)
distance_map_first_plane = []
for i in range(1, 4):
distance_map_first_plane.append([ first_plane[i], self.virtual_atoms.get_distance( first_plane[0], first_plane[i] ) ])
distance_map_first_plane = sorted(distance_map_first_plane, key=lambda x: x[1])
sorted_first_plane = [i[0] for i in distance_map_first_plane]
# 2nd and 3rd planes are found as:
tilting_planes.append([ sequence[0], sorted_dist[4], first_plane[0], sorted_first_plane[2] ])
tilting_planes.append([ sequence[0], sorted_dist[4], sorted_first_plane[0], sorted_first_plane[1] ])
# filter planes by Z according to octahedral spatial compound
filtered = list(filter(lambda x:
abs(self.virtual_atoms[ x[0] ].z - self.virtual_atoms[ x[1] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE and \
abs(self.virtual_atoms[ x[1] ].z - self.virtual_atoms[ x[2] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE and \
abs(self.virtual_atoms[ x[2] ].z - self.virtual_atoms[ x[3] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE,
tilting_planes
)) # Py3
if len(filtered): tilting_planes = filtered
return tilting_planes
def get_tilting(self, oplane):
'''
Main procedure
'''
surf_atom1, surf_atom2, surf_atom3, surf_atom4 = oplane
# divide surface atoms into groups by distance between them
compare = [surf_atom2, surf_atom3, surf_atom4]
distance_map = []
for i in range(0, 3):
distance_map.append([ compare[i], self.virtual_atoms.get_distance(surf_atom1, compare[i]) ])
distance_map = sorted(distance_map, key=lambda x: x[1])
distance_map_keys = [i[0] for i in distance_map]
surf_atom3 = distance_map_keys[2]
surf_atom2 = distance_map_keys[1]
surf_atom4 = distance_map_keys[0]
if self.virtual_atoms[surf_atom1].z == self.virtual_atoms[surf_atom2].z and \
self.virtual_atoms[surf_atom2].z == self.virtual_atoms[surf_atom3].z and \
self.virtual_atoms[surf_atom3].z == self.virtual_atoms[surf_atom4].z:
# this is done to prevent false zero tilting
self.virtual_atoms[surf_atom1].z += 1E-10
self.virtual_atoms[surf_atom2].z += 1E-10
self.virtual_atoms[surf_atom3].z -= 1E-10
self.virtual_atoms[surf_atom4].z -= 1E-10
# new axes will be defined simply as vectors standing on 1 - 3 and 2 - 4 (they are moved to the point of origin)
self.virtual_atoms.append(Atom('X', (self.virtual_atoms[surf_atom1].x - self.virtual_atoms[surf_atom3].x, self.virtual_atoms[surf_atom1].y - self.virtual_atoms[surf_atom3].y, self.virtual_atoms[surf_atom1].z - self.virtual_atoms[surf_atom3].z)))
self.virtual_atoms.append(Atom('X', (self.virtual_atoms[surf_atom2].x - self.virtual_atoms[surf_atom4].x, self.virtual_atoms[surf_atom2].y - self.virtual_atoms[surf_atom4].y, self.virtual_atoms[surf_atom2].z - self.virtual_atoms[surf_atom4].z)))
self.virtual_atoms.append(Atom('X', (0, 0, 0)))
# redefine tilted axes
surf_atom_first = len(self.virtual_atoms)-3
surf_atom_second = len(self.virtual_atoms)-2
center = len(self.virtual_atoms)-1
# inverse arbitrary atom
self.virtual_atoms.append(Atom('X', (-self.virtual_atoms[surf_atom_first].x, -self.virtual_atoms[surf_atom_first].y, -self.virtual_atoms[surf_atom_first].z)))
inversed_one = len(self.virtual_atoms)-1
# find and add bisectors, silly swapping
first_bisector = self.get_bisector_point(surf_atom_first, center, surf_atom_second, self.virtual_atoms)
sec_bisector = self.get_bisector_point(surf_atom_second, center, inversed_one, self.virtual_atoms)
swap = True
if first_bisector[0] < 0 and sec_bisector[0] < 0:
swap = False
if first_bisector[0] < 0:
first_bisector[0] *= -1
first_bisector[1] *= -1
first_bisector[2] *= -1
if sec_bisector[0] < 0:
sec_bisector[0] *= -1
sec_bisector[1] *= -1
sec_bisector[2] *= -1
if swap:
first_bisector, sec_bisector = sec_bisector, first_bisector
swap = False
if first_bisector[0] < sec_bisector[0] and first_bisector[1] < 0:
first_bisector[0] *= -1
first_bisector[1] *= -1
first_bisector[2] *= -1
swap = True
if first_bisector[0] < sec_bisector[0] and first_bisector[1] > 0:
swap = True
if first_bisector[0] > sec_bisector[0] and sec_bisector[1] < 0:
sec_bisector[0] *= -1
sec_bisector[1] *= -1
sec_bisector[2] *= -1
if swap:
first_bisector, sec_bisector = sec_bisector, first_bisector
self.virtual_atoms.append(Atom('X', (first_bisector[0], first_bisector[1], first_bisector[2])))
self.virtual_atoms.append(Atom('X', (sec_bisector[0], sec_bisector[1], sec_bisector[2])))
first_bisector = len(self.virtual_atoms)-2
sec_bisector = len(self.virtual_atoms)-1
# use vector cross product to define normal which will play Z axis role
self.virtual_atoms.append(Atom('X', (
self.virtual_atoms[first_bisector].y*self.virtual_atoms[sec_bisector].z - self.virtual_atoms[first_bisector].z*self.virtual_atoms[sec_bisector].y,
self.virtual_atoms[first_bisector].z*self.virtual_atoms[sec_bisector].x - self.virtual_atoms[first_bisector].x*self.virtual_atoms[sec_bisector].z,
self.virtual_atoms[first_bisector].x*self.virtual_atoms[sec_bisector].y - self.virtual_atoms[first_bisector].y*self.virtual_atoms[sec_bisector].x
)))
tilt_z = len(self.virtual_atoms)-1
# Euler angles ZYZ
alpha = math.degrees(math.atan2(self.virtual_atoms[sec_bisector].z, self.virtual_atoms[first_bisector].z))
beta = math.degrees(math.atan2(math.sqrt(self.virtual_atoms[tilt_z].x**2 + self.virtual_atoms[tilt_z].y**2), self.virtual_atoms[tilt_z].z))
gamma = math.degrees(math.atan2(self.virtual_atoms[tilt_z].y, -self.virtual_atoms[tilt_z].x))
# angles adjusting
adjust_angles = [45, 90, 135, 180, 225, 270, 315, 360]
tilting = [alpha, beta, gamma]
for i in range(0, 3):
tilting[i] = abs(tilting[i])
if tilting[i] in adjust_angles:
tilting[i] = 0.0
continue
if tilting[i] > self.MAX_TILTING_DEGREE:
for checkpoint in adjust_angles:
if checkpoint - self.MAX_TILTING_DEGREE < tilting[i] < checkpoint + self.MAX_TILTING_DEGREE:
tilting[i] = abs(tilting[i] - checkpoint)
break
return tilting
|
tilde-lab/tilde
|
tilde/apps/perovskite_tilting/perovskite_tilting.py
|
Python
|
mit
| 17,287
|
[
"ASE"
] |
cd49d5594d1c84c8d9976bd8418027aa98f90737d8e5097b64f3a24184c72bfc
|
"""
Author: Ben Dudson, Department of Physics, University of York
benjamin.dudson@york.ac.uk
Edward Blair, Peter Hill, John Wilson
This file is part of PyXPad.
PyXPad is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PyXPad is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Foobar. If not, see <http://www.gnu.org/licenses/>.
"""
from Qt.QtWidgets import (QAbstractItemView, QAction,
QFileDialog, QMainWindow, QMenu, QMessageBox,
QStyle, QTableWidgetItem, QTreeWidgetItem, QWidget)
from Qt.QtGui import (QCursor, QIcon,)
from Qt.QtCore import Qt, QTextCodec, QDir
from .pyxpad_main import Ui_MainWindow
from .configdialog import ConfigDialog
from collections import OrderedDict
try:
import cPickle as pickle
except ImportError:
import pickle
try:
from StringIO import StringIO # Python 2
except ImportError:
from io import StringIO
import sys
import os
import re
import string
import fnmatch # For matching names to wildcard patterns
from keyword import iskeyword # Test if a string is a keyword
import xdg # Names of XDG directories for config
from pyxpad import fourier # FFT-based methods
from pyxpad import calculus # Integration and differentiation methods
from pyxpad import user_functions # Miscellaneous useful functions
class Sources:
sources = [] # List of sources
def __init__(self, mainwindow):
self.main = mainwindow
self.main.sourceDescription.stateChanged.connect(self.updateDisplay)
self.groupIcon = QIcon()
self.groupIcon.addPixmap(mainwindow.style().standardPixmap(QStyle.SP_DirClosedIcon),
QIcon.Normal, QIcon.Off)
self.groupIcon.addPixmap(mainwindow.style().standardPixmap(QStyle.SP_DirOpenIcon),
QIcon.Normal, QIcon.On)
self.keyIcon = QIcon()
self.keyIcon.addPixmap(mainwindow.style().standardPixmap(QStyle.SP_FileIcon))
self.main.treeView.itemSelectionChanged.connect(self.updateDisplay)
self.main.tracePattern.returnPressed.connect(self.updateDisplay)
self.main.treeView.setContextMenuPolicy(Qt.CustomContextMenu) # Enable popup menus
# Context menu
self.main.treeView.customContextMenuRequested.connect(self.handlePopupMenu)
# Context menu actions
self.actionAdd = QAction("Add", self.main, statusTip="Add a new source")
self.actionDelete = QAction("Delete", self.main, statusTip="Remove source from tree")
self.actionDelete.triggered.connect(self.deleteSource)
self.actionConfig = QAction("Configure", self.main, statusTip="Configure source")
self.actionConfig.triggered.connect(self.configureSource)
def saveState(self, f):
pickle.dump(self.sources, f)
def loadState(self, f):
try:
sources = pickle.load(f)
except EOFError:
sources = []
for s in sources:
self.addSource(s)
self.updateDisplay()
def handlePopupMenu(self):
"""
Called when user right-clicks on the sources tree
"""
menu = QMenu()
menu.addAction(self.actionAdd)
selected = self.main.treeView.selectedItems()
if len(selected) != 0:
if 'config' in selected[0].source.__dict__:
menu.addAction(self.actionConfig)
menu.addAction(self.actionDelete)
menu.exec_(QCursor.pos())
def addNetCDF(self):
"""
Add a NetCDF file as a data source
"""
try:
from pyxpad.datafile import NetCDFDataSource
except ImportError:
self.main.write("Sorry, no NetCDF support")
return
try:
# Get the file name
tr = self.main.tr
fname, _ = QFileDialog.getOpenFileName(self.main, tr('Open file'), '.',
filter=tr("NetCDF files (*.nc *.cdl)"))
if (fname is None) or (fname == ""):
return # Cancelled
s = NetCDFDataSource(fname)
self.addSource(s)
self.updateDisplay()
except:
self.main.write("Error creating NetCDFDataSource")
self.main.write(str(sys.exc_info()))
def addXPADtree(self):
try:
from pyxpad.xpadsource import XPadSource
except ImportError:
self.main.write("Sorry, no XPAD tree support")
self.main.write(str(sys.exc_info()))
return
try:
# Select the directory
tr = self.main.tr
dname = QFileDialog.getExistingDirectory(self.main, tr('Open XPAD directory'),
QDir.currentPath())
if (dname == "") or (dname is None):
return
# Create data source
s = XPadSource(dname)
# Add data source and update
self.addSource(s)
self.updateDisplay()
except:
self.main.write("Error creating XPadSource")
self.main.write(str(sys.exc_info()))
raise
def addBOUT(self):
"""
Add a BOUT++ directory source
"""
try:
from pyxpad.boutsource import BoutDataSource
# Select the directory
tr = self.main.tr
dname = QFileDialog.getExistingDirectory(self.main, tr('Open BOUT++ directory'),
QDir.currentPath())
if (dname == "") or (dname is None):
return
# Create data source
s = BoutDataSource(dname)
# Add data source and update
self.addSource(s)
self.updateDisplay()
except:
self.main.write("Sorry, no BOUT++ support")
raise
return
def addSource(self, source):
self.sources.append(source)
it = QTreeWidgetItem(self.main.treeView, [source.label])
it.setIcon(0, self.groupIcon)
it.source = source
self.main.treeView.addTopLevelItem(it)
def buildtree(parent, it):
# Check for children
try:
for child in parent.children:
itchild = QTreeWidgetItem(it, [child.label])
itchild.source = child
buildtree(child, itchild) # Add child's children
it.addChild(itchild)
except AttributeError:
# Probably no children
return
buildtree(source, it)
def deleteSource(self):
tree = self.main.treeView
selected = tree.selectedItems()
if len(selected) == 0:
return
source = selected[0].source
tree.takeTopLevelItem(tree.indexOfTopLevelItem(selected[0])) # Remove from tree
# Remove from list of sources
i = self.sources.index(source)
del self.sources[i]
# Update the display
self.updateDisplay()
def configureSource(self):
"""
Configure a data source, changing the source's
'config' dictionary.
"""
selected = self.main.treeView.selectedItems()
if len(selected) == 0:
return
source = selected[0].source
c = ConfigDialog(source.config, self.main)
c.exec_()
def updateDisplay(self):
table = self.main.sourceTable
# Find which source is selected, and update table view
selected = self.main.treeView.selectedItems()
if len(selected) == 0:
table.clearContents()
table.setRowCount(0)
return
s = selected[0].source
# Check if any items selected
selecteditems = table.selectedItems()
selectedvars = []
nextra = 0
for item in selecteditems:
if 'source' in item.__dict__:
name = item.text()
selectedvars.append((name, item.source))
if item.source != s:
nextra += 1
table.clearContents() # Clear the table and selections
pattern = self.main.tracePattern.text()
if pattern == "":
varNames = s.varNames
else:
# Filter the variable names
varNames = [name for name in s.varNames
if fnmatch.fnmatch(name.lower(), pattern.lower())]
varNames.sort(key=str.lower)
if self.main.sourceDescription.isChecked():
# Provide description for each variable (if available)
table.setColumnCount(2)
table.setRowCount(len(varNames) + nextra)
table.setSelectionBehavior(QAbstractItemView.SelectRows)
def addVar(name, source, selected=False):
var = source.variables[name]
item = QTableWidgetItem(name)
item.source = source
table.setItem(addVar.ind, 0, item)
item.setSelected(selected)
comment = var.desc
if comment == "":
comment = var.label
if var.units != "":
comment += " ("+var.units+") "
if var.dim:
try:
comment += " [" + ", ".join([str(v) for v in var.dim]) + "]"
except TypeError:
if str(var.dim):
comment += " [" + str(var.dim) + "]"
item = QTableWidgetItem(comment)
table.setItem(addVar.ind, 1, item)
item.setSelected(selected)
addVar.ind += 1
else:
# Just a list of variable names. Can use multiple columns
maxrows = 20
n = len(varNames) + nextra
ncols = int(n / maxrows) + 1
table.setColumnCount(ncols)
table.setRowCount(min([n, maxrows]))
table.setSelectionBehavior(QAbstractItemView.SelectItems)
def addVar(name, source, selected=False):
row = addVar.ind % maxrows
col = int(addVar.ind / maxrows)
item = QTableWidgetItem(name)
item.source = source
table.setItem(row, col, item)
item.setSelected(selected)
addVar.ind += 1
addVar.ind = 0
for name, source in selectedvars:
addVar(name, source, True)
sel = [name for name, source in selectedvars if source == s]
for name in varNames:
if name not in sel:
addVar(name, s)
def read(self):
"""
Read the selected data and return as a list of data items
Input
-----
None
Returns
------
[ XPadDataItem ] or equivalent
Modifies
--------
None
"""
# Get list of shots
shotlist = self.main.shotInput.text().split(',')
table = self.main.sourceTable
tableitems = table.selectedItems()
data = []
for item in tableitems:
if 'source' in item.__dict__:
name = item.text()
for shot in shotlist:
s = "Reading " + name + " from " + item.source.label
if shot != "":
s += " shot = " + shot
self.main.write(s)
# Run in a sandbox to catch exceptions and display output
self.main.runSandboxed(lambda: data.append(item.source.read(name, shot)))
else:
print("Ignoring "+item.text())
return data
class PyXPad(QMainWindow, Ui_MainWindow):
"""
Attributes
data Dictionary of variables containing user data
"""
def __init__(self, parent=None, loadfile=None, ignoreconfig=False):
super().__init__(parent)
self.setupUi(self)
self.sources = Sources(self) # Handles data sources
self.data = OrderedDict() # User data
# File menu
self.actionNetCDF_file.triggered.connect(self.sources.addNetCDF)
self.actionXPAD_tree.triggered.connect(self.sources.addXPADtree)
self.actionBOUT_data.triggered.connect(self.sources.addBOUT)
self.actionExit.triggered.connect(self.close)
self.actionLoadState.triggered.connect(self.loadState)
self.actionSaveState.triggered.connect(self.saveState)
# Graphics menu
self.actionPlot.triggered.connect(self.handlePlot)
self.actionOPlot.triggered.connect(self.handleOPlot)
self.actionMPlot.triggered.connect(self.handleMPlot)
self.actionXYPlot.triggered.connect(self.handleXYPlot)
self.actionZPlot.triggered.connect(self.handleZPlot)
self.actionContour.triggered.connect(self.handleContour)
self.actionContour_filled.triggered.connect(self.handleContourf)
self.actionClearFig.triggered.connect(self.handleClearFig)
# Command menu
self.actionDeleteTrace.triggered.connect(self.handleDeleteTrace)
self.actionChop.triggered.connect(self.handleChop)
self.actionIntegrate.triggered.connect(self.handleIntegrate)
self.actionDf_dt.triggered.connect(self.handleDifferentiate)
self.actionAdd.triggered.connect(self.handleAdd)
self.actionMultiply.triggered.connect(self.handleMultiply)
self.actionSubtract.triggered.connect(self.handleSubtract)
self.actionDivide.triggered.connect(self.handleDivide)
self.actionFFTP.triggered.connect(self.handleFFTP)
self.actionRunFFT.triggered.connect(self.handleRunFFT)
self.actionReciprocal.triggered.connect(self.handleReciprocal)
self.actionExponential.triggered.connect(self.handleExponential)
self.actionAbsolute.triggered.connect(self.handleAbsolute)
self.actionArctan.triggered.connect(self.handleArctan)
self.actionNlog.triggered.connect(self.handleNlog)
self.actionNorm.triggered.connect(self.handleNorm)
self.actionInvert.triggered.connect(self.handleInvert)
self.actionAddCon.triggered.connect(self.handleAddCon)
self.actionSubCon.triggered.connect(self.handleSubCon)
self.actionMulCon.triggered.connect(self.handleMulCon)
self.actionDivCon.triggered.connect(self.handleDivCon)
self.actionPowCon.triggered.connect(self.handlePowCon)
self.actionChangeName.triggered.connect(self.handleChangeName)
self.actionChangeUnits.triggered.connect(self.handleChangeUnit)
self.actionClip.triggered.connect(self.handleClip)
self.actionStats.triggered.connect(self.handleStats)
self.actionTimeOff.triggered.connect(self.handleTimeOff)
# Help menu
self.actionAbout.triggered.connect(self.handleAbout)
# Sources tab
self.readDataButton.clicked.connect(self.readData)
self.shotInput.returnPressed.connect(self.readData)
self.lastShotButton.clicked.connect(self.lastShot)
self.commandInput.commandEntered.connect(self.commandEntered)
self.commandButton.clicked.connect(self.commandEntered)
# Data tab
self.dataTable.cellChanged.connect(self.dataTableChanged)
self.dataTable.customContextMenuRequested.connect(self.handlePopupMenu)
try:
from pyxpad.matplotlib_widget import MatplotlibWidget
self.DataPlot = MatplotlibWidget(self.plotTab)
except:
raise
if not ignoreconfig and loadfile is None:
# Other configuration can be saved in here
self.config_dir = os.path.join(xdg.XDG_CONFIG_HOME, 'pyxpad')
if os.path.exists(xdg.XDG_CACHE_HOME):
os.makedirs(self.config_dir, exist_ok=True)
defaultfile = os.path.join(self.config_dir, "saved_state.pyx")
if os.path.exists(defaultfile):
loadfile = defaultfile
else:
self.config_dir = os.getcwd()
# Load state
if loadfile is not None:
self.config_dir = os.path.dirname(os.path.abspath(loadfile))
self.loadState(loadfile)
def saveState(self, filename=None):
"""
Saves program state to given file. If no file is specified,
then a dialog is created to ask the user for one.
"""
if filename is None:
tr = self.tr
defaultfile = os.path.join(self.config_dir, "saved_state.pyx")
filename, _ = QFileDialog.getSaveFileName(self, dir=defaultfile,
filter=tr("PyXPad save file (*.pyx)"))
if (filename is None) or (filename == ""):
return
try:
with open(filename, 'wb') as f:
self.sources.saveState(f)
pickle.dump(self.data, f)
self.write("** Saved state to file '"+filename+"'")
except:
e = sys.exc_info()
self.write("Could not save state to file '"+filename+"'")
self.write("\t ->" + str(e[1]))
def loadState(self, filename=None):
"""
Loads program state from the given filename.
If no filename is specified, then a dialog is created
to ask the user for a file name.
"""
if filename is None:
tr = self.tr
filename, _ = QFileDialog.getOpenFileName(self, tr('Open file'), '.',
filter=tr("PyXPad save file (*.pyx)"))
if (filename is None) or (filename == ""):
return # Cancelled
if not os.path.exists(filename):
self.write("Could not find " + filename)
return
try:
with open(filename, 'rb') as f:
self.sources.loadState(f)
self.data = pickle.load(f)
except EOFError:
self.data = OrderedDict()
except:
e = sys.exc_info()
self.write("Could not load state from file '"+filename+"'")
self.write("\t ->" + str(e[1]))
raise
else:
# If no exception raised, then update tables, lists
self.data = OrderedDict(self.data)
self.updateDataTable()
self.write("** Loaded state from file '"+filename+"'")
def closeEvent(self, event):
"""
Called when the main window is closed
"""
reply = QMessageBox.question(self, 'Message',
"Are you sure to quit?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def write(self, text):
"""
Write some log text to output text widget
"""
self.textOutput.append(text)
def makeUnique(self, name):
"""
Modifies a given string into a valid Python variable name
which is not already in self.data
Input
-----
name :: string
self.data (class member)
Returns
------
string containing modified name
Modifies
--------
None
"""
# First make sure the name is a valid variable name
name = re.sub('\W|^(?=\d)', '_', name) # Replace invalid characters with '_'
if iskeyword(name): # Check if name is a keyword
name += '2'
if name in self.data:
# Name is already in the list. Add a number to the end to make it unique
i = 1
while name + "_"+str(i) in self.data:
i += 1
return name + "_"+str(i)
return name
def uniqueName(self):
"""
Generates a unique variable name, which
is not already in self.data
"""
def findName(name, length):
"""
Finds a name
"""
for c in string.ascii_lowercase:
if length <= 1:
if name+c not in self.data:
return name+c
else:
r = findName(name+c, length-1)
if r is not None:
return r
return None
length = 1
while True:
n = findName("", length)
if n is not None:
return n
length += 1
def readData(self):
"""
User pressed the "Read" button to read selected
data items from given shots.
Input
-----
None
Returns
------
None
Modifies
--------
self.data
Calls Sources to read the new data
Inserts data into self.data, ensuring that the name
of each data item is unique and a valid Python name
Updates the data table based on self.data
"""
# Switch to data tab
self.tabWidget.setCurrentWidget(self.dataTab)
# Get the data from the source as a list
newdata = self.sources.read()
if (newdata is None) or (newdata == []):
return # No data read
# Add to the data dictionary
for item in newdata:
try:
# Need to make the name unique
name = self.makeUnique(item.name)
self.data[name] = item
except:
self.write("Error adding item '"+str(item)+"'")
self.updateDataTable()
def lastShot(self):
"""
Get the latest shot number
"""
# Find an XPadSource in the list of sources
from pyxpad.xpadsource import XPadSource
source = None
for _source in self.sources.sources:
if isinstance(_source, XPadSource):
source = _source
if source is None:
# None available
return
# Now ask for the lastshot
data_item = source.read("lastshot", "")
last_shot_number = data_item.data.children[0].lastshot
# Append the shot number to any existing shot numbers
current_text = self.sources.main.shotInput.text()
if current_text == '':
new_text = str(last_shot_number)
else:
new_text = ', '.join([current_text, str(last_shot_number)])
self.sources.main.shotInput.setText(new_text)
return last_shot_number
def updateDataTable(self):
"""
Updates the table of data based on self.data dictionary
"""
n = len(self.data)
table = self.dataTable
table.setSortingEnabled(False) # Stops the table rearranging itself
self.dataTable.cellChanged.disconnect(self.dataTableChanged) # Don't call the dataTableChanged function
table.setRowCount(n)
for row, name in enumerate(self.data):
item = self.data[name]
it = QTableWidgetItem(name)
it.oldname = name # Save this for when it's changed
table.setItem(row, 0, it)
# Assume it's an XPadDataItem
try:
it = QTableWidgetItem(item.source)
it.setFlags(it.flags() ^ Qt.ItemIsEditable) # Make source read only
table.setItem(row, 1, it)
except:
table.setItem(row, 1, QTableWidgetItem(""))
try:
it = QTableWidgetItem(item.name)
it.setFlags(it.flags() ^ Qt.ItemIsEditable) # Make trace read only
table.setItem(row, 2, it)
except:
table.setItem(row, 2, QTableWidgetItem(""))
try:
try:
comment = item.comment
except AttributeError:
comment = item.desc
if comment == "":
comment = item.label
if item.units != "":
comment += " ("+item.units+") "
if item.dim != []:
comment += " [" + item.dim[0].name
for d in item.dim[1:]:
comment += ", " + d.name
comment += "] "
else:
comment += " = " + str(item.data)
table.setItem(row, 3, QTableWidgetItem(comment))
except:
table.setItem(row, 3, QTableWidgetItem(str(item)))
table.setSortingEnabled(True) # Re-enable sorting
self.dataTable.cellChanged.connect(self.dataTableChanged)
def dataTableChanged(self, row, col):
"""
Called when the user changes the value of a cell
in the data table. This can either be to change
the name of a variable, or the comment.
"""
if col == 0:
# The name of the variable
it = self.dataTable.item(row, col)
name = it.text()
oldname = it.oldname
if name == oldname:
return # Not really changed
# Need to make sure new name is unique and valid
name = self.makeUnique(name)
it.setText(name)
it.oldname = name
self.data[name] = self.data[oldname]
del self.data[oldname]
if col == 3:
# Changing the comment
comment = self.dataTable.item(row, col).text()
name = self.dataTable.item(row, 0).text()
self.data[name].comment = comment
print(row, col)
def commandEntered(self, text=None):
"""
Called when a command is entered on the Data tab.
Gets the command string from the text box, and calls
the runCommand to run the command.
"""
# If there's no text, then the "Run" button was probably
# pressed
if text is None or text is False:
text = self.commandInput.text()
self.commandInput.execute(emit=False)
self.commandInput.clear()
self.runCommand(text)
def selectedDataNames(self):
"""
Retuns a list of the names of variables selected
in the Data table.
"""
# Find which items are selected in the data table
items = self.dataTable.selectedItems() # All selected items
if len(items) == 0:
return []
names = [] # List of selected data names
for it in items:
try:
names.append(it.oldname)
except AttributeError:
pass # Ignore if doesn't have a name
return names
def handlePopupMenu(self):
"""
Called when user right-clicks on a trace
"""
menu = QMenu()
selected = self.selectedDataNames()
if len(selected) != 0:
menu.addAction(self.actionDeleteTrace)
menu.exec_(QCursor.pos())
##################### Plot menu actions #####################
def handlePlot(self):
# Find which items are selected
names = self.selectedDataNames()
if len(names) == 0:
return
# Sort by trace name
def trace(name):
try:
return self.data[name].name
except:
return ""
namelist = [(name, trace(name)) for name in names]
values = set(map(lambda x: x[1], namelist))
groups = [[y[0] for y in namelist if y[1] == x] for x in values]
def plotStr(items):
return "[" + ", ".join(items) + "]"
# Create a command to execute
cmd = "plot(" + ", ".join([plotStr(group) for group in groups]) + ")"
# Run the command within sandboxed environment
# Also shows user the command which can be entered
self.runCommand(cmd)
self.tabWidget.setCurrentWidget(self.plotTab)
def handleOPlot(self):
"""
Creates an overlap plot of selected traces
"""
names = self.selectedDataNames()
if len(names) == 0:
return
def trace(name):
try:
return self.data[name].name
except:
return ""
namelist = [(name, trace(name)) for name in names]
values = set(map(lambda x: x[1], namelist))
groups = [[y[0] for y in namelist if y[1] == x] for x in values]
def plotStr(items):
return "[" + ", ".join(items) + "]"
cmd = "oplot(" + ", ".join([plotStr(group) for group in groups]) + ")"
self.runCommand(cmd)
self.tabWidget.setCurrentWidget(self.plotTab)
def handleMPlot(self):
"""
Creates multiple subplots of multiple traces input by the user
"""
names = self.selectedDataNames()
if len(names) == 0:
return
def trace(name):
try:
return self.data[name].name
except:
return ""
namelist = [(name, trace(name)) for name in names]
values = set(map(lambda x: x[1], namelist))
groups = [[y[0] for y in namelist if y[1] == x] for x in values]
def plotStr(items):
return "[" + ", ".join(items) + "]"
cmd = "mplot(" + ", ".join([plotStr(group) for group in groups]) + ")"
self.runCommand(cmd)
self.tabWidget.setCurrentWidget(self.plotTab)
def handleXYPlot(self):
names = self.selectedDataNames()
if len(names) != 2:
self.write("** Two data items must be selected for X-Y plotting")
return
# Run the command in sandbox
self.runCommand("plotxy( "+names[0]+", "+names[1]+")")
self.tabWidget.setCurrentWidget(self.plotTab)
def handleZPlot(self):
"""
Creates an zoomed plot of selected traces
"""
names = self.selectedDataNames()
if len(names) == 0:
return
def trace(name):
try:
return self.data[name].name
except:
return ""
namelist = [(name, trace(name)) for name in names]
values = set(map(lambda x: x[1], namelist))
groups = [[y[0] for y in namelist if y[1] == x] for x in values]
def plotStr(items):
return "[" + ", ".join(items) + "]"
cmd = "zplot(" + ", ".join([plotStr(group) for group in groups]) + ")"
self.runCommand(cmd)
self.tabWidget.setCurrentWidget(self.plotTab)
def handleClearFig(self):
self.runCommand("clearFig()")
def handleContour(self):
"""
Make a contour plot of a 2D trace
"""
names = self.selectedDataNames()
if len(names) != 1:
self.write("** One data item must be selected for contour")
return
self.runCommand("contour("+names[0]+")")
self.tabWidget.setCurrentWidget(self.plotTab)
def handleContourf(self):
"""
Make a contour plot of a 2D trace
"""
names = self.selectedDataNames()
if len(names) != 1:
self.write("** One data item must be selected for contourf")
return
self.runCommand("contourf("+names[0]+")")
self.tabWidget.setCurrentWidget(self.plotTab)
def handleCommandAction(self, command):
names = self.selectedDataNames()
if len(names) != 1:
self.write("** One data item must be selected for "+command)
return
# Run the command in sandbox
self.runCommand(command+"( "+names[0]+" )")
######## Command menu handlers
def handleDeleteTrace(self):
"""
Delete selected traces
"""
# Get list of selected variables
names = self.selectedDataNames()
if len(names) == 0:
return
for name in names:
self.runCommand("del({})".format(name))
def handleChop(self):
"""
Chops a signal, keeping only specified time range
"""
# Get list of selected variables
names = self.selectedDataNames()
if len(names) == 0:
return
try:
# Get current time range from first variable
var = self.data[names[0]]
if var.time is not None:
tmin = var.time[0]
tmax = var.time[-1]
else:
tmin = var.dim[0].data[0]
tmax = var.dim[0].data[-1]
except:
return
# Use a dialog box to get time range
config = OrderedDict({"min": float(tmin), "max": float(tmax)})
c = ConfigDialog(config, self)
c.exec_()
for n in names:
self.runCommand(self.makeUnique(n+"_chop") + " = " +
"chop( "+n+", %e, %e )" % (config["min"], config["max"]))
def handleIntegrate(self):
"""
Integrates one or more traces
"""
names = self.selectedDataNames()
for n in names:
self.runCommand(self.uniqueName() + " = " + "intg( "+n+" )")
def handleDifferentiate(self):
"""
Differentiates one or more traces
"""
names = self.selectedDataNames()
for n in names:
self.runCommand(self.uniqueName() + " = " + "diff(" + n + ")")
def handleAdd(self):
"""
Adds all selected traces together
"""
names = self.selectedDataNames()
if len(names) < 2:
self.write("** At least two data items must be selected to add together")
return
self.runCommand(self.uniqueName() + " = " + "+".join(names))
def handleMultiply(self):
"""
Adds all selected traces together
"""
names = self.selectedDataNames()
if len(names) < 2:
self.write("** At least two data items must be selected to multiply together")
return
self.runCommand(self.uniqueName() + " = " + "*".join(names))
def handleSubtract(self):
"""
Subtracts one trace from another
"""
names = self.selectedDataNames()
if len(names) != 2:
self.write("** Two data items must be selected to subtract one from another")
return
self.runCommand(self.uniqueName()+" = " + names[0] + " - " + names[1])
def handleDivide(self):
"""
Divide one trace by another
"""
names = self.selectedDataNames()
if len(names) != 2:
self.write("** Two data items must be selected to subtract one from another")
return
self.runCommand(self.uniqueName()+" = " + names[0] + " / " + names[1])
def handleFFTP(self):
"""
Perform FFT, returning amplitude and phase
"""
for n in self.selectedDataNames():
# Create a unique name for amplitude and phase
ampname = self.makeUnique(n+"_amp")
phasename = self.makeUnique(n+"_phase")
self.runCommand(ampname+","+phasename + " = " + "fftp( "+n+" )")
def handleRunFFT(self):
"""
Perform Running FFT
"""
names = self.selectedDataNames()
if len(names) == 0:
return
stride = 0.001
width = 0.001
# Use a dialog box to get width and stride
config = OrderedDict({"stride": float(width), "width": float(stride)})
c = ConfigDialog(config, self)
c.exec_()
for name in names:
# Create a unique name
new_name = self.makeUnique(name + "_runfft")
self.runCommand(new_name + " = " +
"runfft({}, stride={}, width={})".format(name, config["stride"], config["width"]))
def handleReciprocal(self):
"""
Returns the reciprocal of one or more trace(s)
"""
names = self.selectedDataNames()
for n in names:
self.runCommand(self.uniqueName() + " = " + "recip( "+n+" )")
def handleExponential(self):
"""
Returns the reciprocal of one or more trace(s)
"""
names = self.selectedDataNames()
for n in names:
self.runCommand(self.uniqueName() + " = " + "exp( "+n+" )")
def handleAbsolute(self):
"""
Returns the absolute value of one or more trace(s)
"""
names = self.selectedDataNames()
for n in names:
self.runCommand(self.uniqueName() + " = " + "abs( "+n+" )")
def handleArctan(self):
"""
Returns the arctan of one or more trace(s)
"""
names = self.selectedDataNames()
for n in names:
self.runCommand(self.uniqueName() + " = " + "atan( "+n+" )")
def handleNlog(self):
"""
Returns the natural log of one or more trace(s)
"""
names = self.selectedDataNames()
for n in names:
self.runCommand(self.uniqueName() + " = " + "ln( "+n+" )")
def handleNorm(self):
"""
Normalises and returns one or more trace(s)
"""
names = self.selectedDataNames()
for n in names:
self.runCommand(self.uniqueName() + " = " + "norm( "+n+" )")
def handleInvert(self):
"""
Returns the inversion of one or more trace(s)
"""
names = self.selectedDataNames()
for n in names:
self.runCommand(self.uniqueName() + " = " + "inv( "+n+" )")
def handleAddCon(self):
"""
Adds a constant to and returns one or more trace(s)
"""
names = self.selectedDataNames()
for n in names:
self.runCommand(self.uniqueName() + " = " + "addcons( "+n+" )")
def handleSubCon(self):
"""
Subtracts a constant from and returns one or more trace(s)
"""
names = self.selectedDataNames()
for n in names:
self.runCommand(self.uniqueName() + " = " + "subcons( "+n+" )")
def handleMulCon(self):
"""
Multiplies by a constant and returns one or more trace(s)
"""
names = self.selectedDataNames()
for n in names:
self.runCommand(self.uniqueName() + " = " + "mulcons( "+n+" )")
def handleDivCon(self):
"""
Divides by a constant and returns one or more trace(s)
"""
names = self.selectedDataNames()
for n in names:
self.runCommand(self.uniqueName() + " = " + "divcons( "+n+" )")
def handlePowCon(self):
"""
Raises to the power of a constant and returns one or more trace(s)
"""
names = self.selectedDataNames()
for n in names:
self.runCommand(self.uniqueName() + " = " + "powcons( "+n+" )")
def handleChangeName(self):
"""
Changes the name of one or more trace(s)
"""
names = self.selectedDataNames()
for n in names:
name = user_functions.inputname()
self.runCommand(self.makeUnique(name) + " = " + "renamed( "+n+" )")
def handleChangeUnit(self):
"""
Changes the units of one or more trace(s)
"""
names = self.selectedDataNames()
for n in names:
self.runCommand(self.uniqueName() + " = " + "newunits( "+n+" )")
def handleClip(self):
"""
Clips a signal, keeping only a specified value ranged
"""
names = self.selectedDataNames()
if len(names) == 0:
return
try:
# Get current data range from first variable
var = self.data[names[0]]
valmin = var.data[0]
valmax = var.data[0]
for point in var.data:
if point < valmin:
valmin = point
if point > valmax:
valmax = point
except:
return
# Use a dialog box to get value range
config = OrderedDict({"min": float(valmin), "max": float(valmax)})
c = ConfigDialog(config, self, pstvOnly=False)
c.exec_()
for n in names:
self.runCommand(self.makeUnique(n+"_clip") + " = " +
"clip("+n+", %e, %e )" % (config["min"], config["max"]))
def handleStats(self):
"""
Returns the statistics (mean, standard deviation and range) of one or more traces
"""
names = self.selectedDataNames()
if len(names) == 0:
return
for n in names:
self.runCommand("stats("+n+")")
def handleTimeOff(self):
"""
Adds a time offset to one or more traces
"""
names = self.selectedDataNames()
for n in names:
self.runCommand(self.uniqueName() + " = " + "timoff("+n+")")
########## Help menu handlers ##########
def handleAbout(self):
"""
Displays the About dialog
"""
about_box = QMessageBox()
about_box.setText(__doc__)
about_box.exec_()
##########
def runSandboxed(self, func, args=()):
# To capture print statements stdout is temporarily directed to a StringIO buffer
buffer = StringIO()
oldstdout = sys.stdout
sys.stdout = buffer
val = None
try:
val = func(*args)
except:
e = sys.exc_info()
self.write("Error: " + str(e[0]))
self.write("Reason: " + str(e[1]))
sys.stdout = oldstdout
output = buffer.getvalue()
if len(output) > 0:
self.write(output)
return val
def _runExec(self, cmd, glob, loc):
"""
This is a wrapper around exec
Needed because exec isn't allowed in a lambda or nested function
and can't be passed as a function pointer.
"""
exec(cmd, glob, loc)
def runCommand(self, cmd):
# Output the command
self.write(">>> " + cmd)
glob = globals()
glob['plot'] = self.DataPlot.plot
glob['oplot'] = self.DataPlot.oplot
glob['mplot'] = self.DataPlot.mplot
glob['plotxy'] = self.DataPlot.plotxy
glob['zplot'] = self.DataPlot.zplot
glob['contour'] = self.DataPlot.contour
glob['contourf'] = self.DataPlot.contourf
glob['clearFig'] = self.DataPlot.clearFig
glob['intg'] = calculus.integrate
glob['diff'] = calculus.differentiate
glob['fftp'] = fourier.fftp
glob['runfft'] = fourier.runfft
glob['chop'] = user_functions.chop
glob['recip'] = user_functions.reciprocal
glob['exp'] = user_functions.exponential
glob['abs'] = user_functions.absolute
glob['atan'] = user_functions.arctan
glob['ln'] = user_functions.nlog
glob['norm'] = user_functions.normalise
glob['inv'] = user_functions.invert
glob['addcons'] = user_functions.addcon
glob['subcons'] = user_functions.subcon
glob['mulcons'] = user_functions.mulcon
glob['divcons'] = user_functions.divcon
glob['powcons'] = user_functions.powcon
glob['renamed'] = user_functions.changename
glob['newunits'] = user_functions.changeunits
glob['clip'] = user_functions.clip
glob['stats'] = user_functions.statistics
glob['timoff'] = user_functions.timeOffset
# Evaluate the command, catching any exceptions
# Local scope is set to self.data to allow access to user data
self.runSandboxed(self._runExec, args=(cmd, glob, self.data))
self.updateDataTable()
|
bendudson/pyxpad
|
pyxpad/pyxpad.py
|
Python
|
gpl-3.0
| 44,509
|
[
"NetCDF"
] |
b7500d8d146e89196674b53a4ed08d6a45431f6525857ad00ce8676e0ea521e3
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class Vasp(MakefilePackage):
"""
The Vienna Ab initio Simulation Package (VASP)
is a computer program for atomic scale materials modelling,
e.g. electronic structure calculations
and quantum-mechanical molecular dynamics, from first principles.
"""
homepage = "http://vasp.at"
url = "file://{0}/vasp.5.4.4.pl2.tgz".format(os.getcwd())
version('5.4.4.pl2', sha256='98f75fd75399a23d76d060a6155f4416b340a1704f256a00146f89024035bc8e')
version('5.4.4', sha256='5bd2449462386f01e575f9adf629c08cb03a13142806ffb6a71309ca4431cfb3')
resource(name='vaspsol',
git='https://github.com/henniggroup/VASPsol.git',
tag='V1.0',
when='+vaspsol')
variant('scalapack', default=False,
description='Enables build with SCALAPACK')
variant('cuda', default=False,
description='Enables running on Nvidia GPUs')
variant('vaspsol', default=False,
description='Enable VASPsol implicit solvation model\n'
'https://github.com/henniggroup/VASPsol')
depends_on('rsync', type='build')
depends_on('blas')
depends_on('lapack')
depends_on('fftw')
depends_on('mpi', type=('build', 'link', 'run'))
depends_on('netlib-scalapack', when='+scalapack')
depends_on('cuda', when='+cuda')
depends_on('qd', when='%nvhpc')
conflicts('%gcc@:8', msg='GFortran before 9.x does not support all features needed to build VASP')
conflicts('+vaspsol', when='+cuda', msg='+vaspsol only available for CPU')
parallel = False
def edit(self, spec, prefix):
if '%gcc' in spec:
make_include = join_path('arch', 'makefile.include.linux_gnu')
elif '%nvhpc' in spec:
make_include = join_path('arch', 'makefile.include.linux_pgi')
filter_file('pgcc', spack_cc, make_include)
filter_file('pgc++', spack_cxx, make_include, string=True)
filter_file('pgfortran', spack_fc, make_include)
filter_file('/opt/pgi/qd-2.3.17/install/include',
spec['qd'].prefix.include, make_include)
filter_file('/opt/pgi/qd-2.3.17/install/lib',
spec['qd'].prefix.lib, make_include)
filter_file('^SCALAPACK[ ]{0,}=.*$', 'SCALAPACK ?=', make_include)
else:
make_include = join_path('arch',
'makefile.include.linux_' +
spec.compiler.name)
os.rename(make_include, 'makefile.include')
# This bunch of 'filter_file()' is to make these options settable
# as environment variables
filter_file('^CPP_OPTIONS[ ]{0,}=[ ]{0,}',
'CPP_OPTIONS ?= ',
'makefile.include')
filter_file('^LIBDIR[ ]{0,}=.*$', '', 'makefile.include')
filter_file('^BLAS[ ]{0,}=.*$', 'BLAS ?=', 'makefile.include')
filter_file('^LAPACK[ ]{0,}=.*$', 'LAPACK ?=', 'makefile.include')
filter_file('^FFTW[ ]{0,}?=.*$', 'FFTW ?=', 'makefile.include')
filter_file('^MPI_INC[ ]{0,}=.*$', 'MPI_INC ?=', 'makefile.include')
filter_file('-DscaLAPACK.*$\n', '', 'makefile.include')
filter_file('^SCALAPACK*$', '', 'makefile.include')
if '+cuda' in spec:
filter_file('^OBJECTS_GPU[ ]{0,}=.*$',
'OBJECTS_GPU ?=',
'makefile.include')
filter_file('^CPP_GPU[ ]{0,}=.*$',
'CPP_GPU ?=',
'makefile.include')
filter_file('^CFLAGS[ ]{0,}=.*$',
'CFLAGS ?=',
'makefile.include')
if '+vaspsol' in spec:
copy('VASPsol/src/solvation.F', 'src/')
def setup_build_environment(self, spack_env):
spec = self.spec
cpp_options = ['-DMPI -DMPI_BLOCK=8000',
'-Duse_collective', '-DCACHE_SIZE=4000',
'-Davoidalloc', '-Duse_bse_te',
'-Dtbdyn', '-Duse_shmem']
if '%nvhpc' in self.spec:
cpp_options.extend(['-DHOST=\\"LinuxPGI\\"', '-DPGI16',
'-Dqd_emulate'])
else:
cpp_options.append('-DHOST=\\"LinuxGNU\\"')
cflags = ['-fPIC', '-DADD_']
spack_env.set('BLAS', spec['blas'].libs.ld_flags)
spack_env.set('LAPACK', spec['lapack'].libs.ld_flags)
spack_env.set('FFTW', spec['fftw'].prefix)
spack_env.set('MPI_INC', spec['mpi'].prefix.include)
if '+scalapack' in spec:
cpp_options.append('-DscaLAPACK')
spack_env.set('SCALAPACK', spec['netlib-scalapack'].libs.ld_flags)
if '+cuda' in spec:
cpp_gpu = ['-DCUDA_GPU', '-DRPROMU_CPROJ_OVERLAP',
'-DCUFFT_MIN=28', '-DUSE_PINNED_MEMORY']
objects_gpu = ['fftmpiw.o', 'fftmpi_map.o', 'fft3dlib.o',
'fftw3d_gpu.o', 'fftmpiw_gpu.o']
cflags.extend(['-DGPUSHMEM=300', '-DHAVE_CUBLAS'])
spack_env.set('CUDA_ROOT', spec['cuda'].prefix)
spack_env.set('CPP_GPU', ' '.join(cpp_gpu))
spack_env.set('OBJECTS_GPU', ' '.join(objects_gpu))
if '+vaspsol' in spec:
cpp_options.append('-Dsol_compat')
# Finally
spack_env.set('CPP_OPTIONS', ' '.join(cpp_options))
spack_env.set('CFLAGS', ' '.join(cflags))
def build(self, spec, prefix):
if '+cuda' in self.spec:
make('gpu', 'gpu_ncl')
else:
make()
def install(self, spec, prefix):
install_tree('bin/', prefix.bin)
|
iulian787/spack
|
var/spack/repos/builtin/packages/vasp/package.py
|
Python
|
lgpl-2.1
| 5,921
|
[
"VASP"
] |
255ce39f35550953087bb869c47e492528deb31affc83eb4e7eb4b0ab130b3e0
|
from ase import Atoms
from ase.constraints import FixLinearTriatomic
from ase.calculators.acn import (ACN, m_me,
r_mec, r_cn)
from ase.md import Langevin
import ase.units as units
from ase.io import Trajectory
import numpy as np
pos = [[0, 0, -r_mec],
[0, 0, 0],
[0, 0, r_cn]]
atoms = Atoms('CCN', positions=pos)
atoms.rotate(30, 'x')
# First C of each molecule needs to have the mass of a methyl group
masses = atoms.get_masses()
masses[::3] = m_me
atoms.set_masses(masses)
# Determine side length of a box with the density of acetonitrile at 298 K
d = 0.776 / 1e24 # Density in g/Ang3 (https://pubs.acs.org/doi/10.1021/je00001a006)
L = ((masses.sum() / units.mol) / d)**(1 / 3.)
# Set up box of 27 acetonitrile molecules
atoms.set_cell((L, L, L))
atoms.center()
atoms = atoms.repeat((3, 3, 3))
atoms.set_pbc(True)
# Set constraints for rigid triatomic molecules
nm = 27
atoms.constraints = FixLinearTriatomic(
triples=[(3 * i, 3 * i + 1, 3 * i + 2)
for i in range(nm)])
tag = 'acn_27mol_300K'
atoms.calc = ACN(rc=np.min(np.diag(atoms.cell))/2)
# Create Langevin object
md = Langevin(atoms, 1 * units.fs,
temperature=300 * units.kB,
friction=0.01,
logfile=tag + '.log')
traj = Trajectory(tag + '.traj', 'w', atoms)
md.attach(traj.write, interval=1)
md.run(5000)
# Repeat box and equilibrate further
atoms.set_constraint()
atoms = atoms.repeat((2, 2, 2))
nm = 216
atoms.constraints = FixLinearTriatomic(
triples=[(3 * i, 3 * i + 1, 3 * i + 2)
for i in range(nm)])
tag = 'acn_216mol_300K'
atoms.calc = ACN(rc=np.min(np.diag(atoms.cell))/2)
# Create Langevin object
md = Langevin(atoms, 2 * units.fs,
temperature=300 * units.kB,
friction=0.01,
logfile=tag + '.log')
traj = Trajectory(tag + '.traj', 'w', atoms)
md.attach(traj.write, interval=1)
md.run(3000)
|
miroi/open-collection
|
theoretical_chemistry/software_runs/ase/runs/acn_equil/acn_equil.py
|
Python
|
mit
| 2,006
|
[
"ASE"
] |
7b542121e60607e1f35d1bac6e193889dd7eaa43c53abc8d6d6c647f6c4d415c
|
from datetime import datetime
import logging
from django.core.urlresolvers import reverse
from django.utils import html
from django.utils.translation import ugettext as _, ugettext_noop
import json
from corehq.apps.api.es import ReportCaseES
from corehq.apps.cloudcare.api import get_cloudcare_app, get_cloudcare_form_url
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.filters.search import SearchFilter
from corehq.apps.reports.generic import ElasticProjectInspectionReport
from corehq.apps.reports.standard import CustomProjectReport, ProjectReportParametersMixin
from corehq.apps.reports.standard.cases.data_sources import CaseDisplay
from corehq.elastic import es_query
from corehq.pillows.base import restore_property_dict
from corehq.pillows.mappings.reportcase_mapping import REPORT_CASE_INDEX
from custom.succeed.reports.patient_Info import PatientInfoReport
from custom.succeed.reports import VISIT_SCHEDULE, LAST_INTERACTION_LIST, EMPTY_FIELD, \
INPUT_DATE_FORMAT, OUTPUT_DATE_FORMAT, CM_APP_UPDATE_VIEW_TASK_MODULE, CM_UPDATE_TASK, TASK_RISK_FACTOR, TASK_ACTIVITY
from custom.succeed.utils import is_succeed_admin, has_any_role, SUCCEED_CM_APPNAME, get_app_build
from casexml.apps.case.models import CommCareCase
from dimagi.utils.decorators.memoized import memoized
class PatientTaskListReportDisplay(CaseDisplay):
def __init__(self, report, case_dict):
next_visit = VISIT_SCHEDULE[0]
last_inter = None
for action in case_dict['actions']:
if action['xform_xmlns'] in LAST_INTERACTION_LIST:
last_inter = action
for visit_key, visit in enumerate(VISIT_SCHEDULE):
for key, action in enumerate(case_dict['actions']):
if visit['xmlns'] == action['xform_xmlns']:
try:
next_visit = VISIT_SCHEDULE[visit_key + 1]
del case_dict['actions'][key]
break
except IndexError:
next_visit = 'last'
self.next_visit = next_visit
if last_inter:
self.last_interaction = last_inter['date']
self.domain = report.domain
self.app_dict = get_cloudcare_app(self.domain, SUCCEED_CM_APPNAME)
self.latest_build = get_app_build(self.app_dict)
super(PatientTaskListReportDisplay, self).__init__(report, case_dict)
def get_property(self, key):
if key in self.case:
return self.case[key]
else:
return EMPTY_FIELD
def get_link(self, url, field):
if url:
return html.mark_safe("<a class='ajax_dialog' href='%s' target='_blank'>%s</a>" % (url, html.escape(field)))
else:
return "%s (bad ID format)" % self.case["indices"][0]["referenced_id"]
def get_form_url(self, app_dict, app_build_id, module_idx, form, case_id=None):
try:
module = app_dict['modules'][module_idx]
form_idx = [ix for (ix, f) in enumerate(module['forms']) if f['xmlns'] == form][0]
except IndexError:
form_idx = None
return html.escape(get_cloudcare_form_url(domain=self.domain,
app_build_id=app_build_id,
module_id=module_idx,
form_id=form_idx,
case_id=case_id) + '/enter/')
@property
@memoized
def full_name(self):
return CommCareCase.get(self.get_property("indices")[0]["referenced_id"])["full_name"]
@property
def full_name_url(self):
return html.escape(
PatientInfoReport.get_url(*[self.case["domain"]]) + "?patient_id=%s" % self.case["indices"][0]["referenced_id"])
@property
def full_name_link(self):
return self.get_link(self.full_name_url, self.full_name)
@property
def name(self):
return self.get_property("name")
@property
def name_url(self):
if self.status == "Closed":
url = reverse('case_details', args=[self.domain, self.get_property("_id")])
return url + '#!history'
else:
return self.get_form_url(self.app_dict, self.latest_build, CM_APP_UPDATE_VIEW_TASK_MODULE, CM_UPDATE_TASK, self.get_property("_id"))
@property
def name_link(self):
return self.get_link(self.name_url, self.name)
@property
def task_responsible(self):
return self.get_property("task_responsible")
@property
def case_filter(self):
filters = []
care_site = self.request_params.get('task_responsible', '')
if care_site != '':
filters.append({'term': {'task_responsible.#value': care_site.lower()}})
return {'and': filters} if filters else {}
@property
def status(self):
return self.get_property("closed") and "Closed" or "Open"
@property
def task_due(self):
rand_date = self.get_property("task_due")
if rand_date and rand_date != EMPTY_FIELD:
date = datetime.strptime(rand_date, INPUT_DATE_FORMAT)
return date.strftime(OUTPUT_DATE_FORMAT)
else:
return EMPTY_FIELD
@property
def last_modified(self):
rand_date = self.get_property("last_updated")
if rand_date and rand_date != EMPTY_FIELD:
date = datetime.strptime(rand_date, INPUT_DATE_FORMAT)
return date.strftime(OUTPUT_DATE_FORMAT)
else:
return EMPTY_FIELD
@property
def task_activity(self):
key = self.case.get("task_activity", EMPTY_FIELD)
return TASK_ACTIVITY.get(key, key)
@property
def task_risk_factor(self):
key = self.case.get("task_risk_factor", EMPTY_FIELD)
return TASK_RISK_FACTOR.get(key, key)
@property
def task_details(self):
return self.get_property("task_details")
class PatientTaskListReport(CustomProjectReport, ElasticProjectInspectionReport, ProjectReportParametersMixin):
ajax_pagination = True
name = ugettext_noop('Patient Tasks')
slug = 'patient_task_list'
default_sort = {'task_due.#value': 'asc'}
base_template_filters = 'succeed/report.html'
case_type = 'task'
fields = ['custom.succeed.fields.ResponsibleParty',
'custom.succeed.fields.PatientName',
'custom.succeed.fields.TaskStatus',
'corehq.apps.reports.standard.cases.filters.CaseSearchFilter']
@classmethod
def show_in_navigation(cls, domain=None, project=None, user=None):
return True
@property
@memoized
def rendered_report_title(self):
return self.name
@property
@memoized
def case_es(self):
return ReportCaseES(self.domain)
@property
def case_filter(self):
filters = []
care_site = self.request_params.get('care_site', '')
if care_site != '':
filters.append({'term': {'care_site.#value': care_site.lower()}})
return {'and': filters} if filters else {}
@property
def headers(self):
headers = DataTablesHeader(
DataTablesColumn(_("Patient Name"), sortable=False),
DataTablesColumn(_("Task Name"), prop_name="name"),
DataTablesColumn(_("Responsible Party"), prop_name="task_responsible", sortable=False),
DataTablesColumn(_("Status"), prop_name='status', sortable=False),
DataTablesColumn(_("Action Due"), prop_name="task_due.#value"),
DataTablesColumn(_("Last Update"), prop_name='last_updated.#value'),
DataTablesColumn(_("Task Type"), prop_name="task_activity.#value"),
DataTablesColumn(_("Associated Risk Factor"), prop_name="task_risk_factor.#value"),
DataTablesColumn(_("Details"), prop_name="task_details", sortable=False),
)
return headers
@property
@memoized
def es_results(self):
q = { "query": {
"filtered": {
"query": {
"match_all": {}
},
"filter": {
"and": [
{"term": { "domain.exact": "succeed" }},
]
}
}
},
'sort': self.get_sorting_block(),
'from': self.pagination.start if self.pagination else None,
'size': self.pagination.count if self.pagination else None,
}
search_string = SearchFilter.get_value(self.request, self.domain)
es_filters = q["query"]["filtered"]["filter"]
responsible_party = self.request_params.get('responsible_party', '')
if responsible_party != '':
if responsible_party == 'Care Manager':
es_filters["and"].append({"term": {"task_responsible.#value": "cm"}})
else:
es_filters["and"].append({"term": {"task_responsible.#value": "chw"}})
task_status = self.request_params.get('task_status', '')
if task_status != '':
if task_status == 'closed':
es_filters["and"].append({"term": {"closed": True}})
else:
es_filters["and"].append({"term": {"closed": False}})
patient_id = self.request_params.get('patient_id', '')
if patient_id != '':
es_filters["and"].append({"term": {"indices.referenced_id": patient_id}})
def _filter_gen(key, flist):
return {"terms": {
key: [item.lower() for item in flist if item]
}}
user = self.request.couch_user
if not user.is_web_user():
owner_ids = user.get_group_ids()
user_ids = [user._id]
owner_filters = _filter_gen('owner_id', owner_ids)
user_filters = _filter_gen('user_id', user_ids)
filters = filter(None, [owner_filters, user_filters])
subterms = []
subterms.append({'or': filters})
es_filters["and"].append({'and': subterms} if subterms else {})
if self.case_type:
es_filters["and"].append({"term": {"type.exact": 'task'}})
if search_string:
query_block = {"queryString": {"query": "*" + search_string + "*"}}
q["query"]["filtered"]["query"] = query_block
sorting_block = self.get_sorting_block()[0].keys()[0] if len(self.get_sorting_block()) != 0 else None
order = self.get_sorting_block()[0].values()[0] if len(self.get_sorting_block()) != 0 else None
if sorting_block == 'task_risk_factor.#value':
sort = {
"_script": {
"script":
"""
foreach(String key : task_risk_factor_list.keySet()) {
String value = _source.task_risk_factor.get('#value');
if (value == null) {
return '';
} else {
return task_risk_factor_list.get(value);
}
}
return ''
""",
"type": "string",
"params": {
"task_risk_factor_list": TASK_RISK_FACTOR
},
"order": order
}
}
q['sort'] = sort
if sorting_block == 'task_activity.#value':
sort = {
"_script": {
"script":
"""
foreach(String key : task_activity_list.keySet()) {
String value = _source.task_activity.get('#value');
if (value == null) {
return value;
} else {
return task_activity_list.get(value);
}
}
return ''
""",
"type": "string",
"params": {
"task_activity_list": TASK_ACTIVITY
},
"order": order
}
}
q['sort'] = sort
logging.info("ESlog: [%s.%s] ESquery: %s" % (self.__class__.__name__, self.domain, json.dumps(q)))
if self.pagination:
return es_query(q=q, es_url=REPORT_CASE_INDEX + '/_search', dict_only=False, start_at=self.pagination.start)
else:
return es_query(q=q, es_url=REPORT_CASE_INDEX + '/_search', dict_only=False)
@property
def get_all_rows(self):
return self.rows
@property
def rows(self):
case_displays = (PatientTaskListReportDisplay(self, restore_property_dict(self.get_case(case)))
for case in self.es_results['hits'].get('hits', []))
for disp in case_displays:
yield [
disp.full_name_link,
disp.name_link,
disp.task_responsible,
disp.status,
disp.task_due,
disp.last_modified,
disp.task_activity,
disp.task_risk_factor,
disp.task_details
]
@property
def user_filter(self):
return super(PatientTaskListReport, self).user_filter
def get_case(self, row):
if '_source' in row:
case_dict = row['_source']
else:
raise ValueError("Case object is not in search result %s" % row)
if case_dict['domain'] != self.domain:
raise Exception("case.domain != self.domain; %r and %r, respectively" % (case_dict['domain'], self.domain))
return case_dict
|
puttarajubr/commcare-hq
|
custom/succeed/reports/patient_task_list.py
|
Python
|
bsd-3-clause
| 14,071
|
[
"VisIt"
] |
da13d3059c5baba56083fa76ad97d60f0513e7c50f2827f011ec77d812258f91
|
# -*- coding: utf-8 -*-
import numpy as np
from dipy.reconst.multi_voxel import multi_voxel_fit
from dipy.reconst.base import ReconstModel, ReconstFit
from dipy.reconst.cache import Cache
from scipy.special import hermite, gamma, genlaguerre
from scipy.misc import factorial, factorial2
from dipy.core.geometry import cart2sphere
from dipy.reconst.shm import real_sph_harm, sph_harm_ind_list
import dipy.reconst.dti as dti
from warnings import warn
from dipy.core.gradients import gradient_table
from ..utils.optpkg import optional_package
from dipy.core.optimize import Optimizer
cvxopt, have_cvxopt, _ = optional_package("cvxopt")
class MapmriModel(ReconstModel, Cache):
r"""Mean Apparent Propagator MRI (MAPMRI) [1]_ of the diffusion signal.
The main idea is to model the diffusion signal as a linear combination of
the continuous functions presented in [2]_ but extending it in three
dimensions.
The main difference with the SHORE proposed in [3]_ is that MAPMRI 3D
extension is provided using a set of three basis functions for the radial
part, one for the signal along x, one for y and one for z, while [3]_
uses one basis function to model the radial part and real Spherical
Harmonics to model the angular part.
From the MAPMRI coefficients is possible to use the analytical formulae
to estimate the ODF.
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. [2] Ozarslan E. et. al, "Simple harmonic oscillator based reconstruction
and estimation for one-dimensional q-space magnetic resonance
1D-SHORE)", eapoc Intl Soc Mag Reson Med, vol. 16, p. 35., 2008.
.. [3] Merlet S. et. al, "Continuous diffusion signal, EAP and ODF
estimation via Compressive Sensing in diffusion MRI", Medical
Image Analysis, 2013.
.. [4] Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP
data." NeuroImage (2016).
.. [5] Cheng, J., 2014. Estimation and Processing of Ensemble Average
Propagator and Its Features in Diffusion MRI. Ph.D. Thesis.
.. [6] Hosseinbor et al. "Bessel fourier orientation reconstruction
(bfor): An analytical diffusion propagator reconstruction for hybrid
diffusion imaging and computation of q-space indices". NeuroImage
64, 2013, 650–670.
.. [7] Craven et al. "Smoothing Noisy Data with Spline Functions."
NUMER MATH 31.4 (1978): 377-403.
.. [8] Avram et al. "Clinical feasibility of using mean apparent
propagator (MAP) MRI to characterize brain tissue microstructure".
NeuroImage 2015, in press.
"""
def __init__(self,
gtab,
radial_order=6,
laplacian_regularization=True,
laplacian_weighting=0.2,
positivity_constraint=False,
pos_grid=15,
pos_radius='adaptive',
anisotropic_scaling=True,
eigenvalue_threshold=1e-04,
bval_threshold=np.inf,
dti_scale_estimation=True,
static_diffusivity=0.7e-3):
r""" Analytical and continuous modeling of the diffusion signal with
respect to the MAPMRI basis [1]_.
The main idea is to model the diffusion signal as a linear combination
of the continuous functions presented in [2]_ but extending it in three
dimensions.
The main difference with the SHORE proposed in [3]_ is that MAPMRI 3D
extension is provided using a set of three basis functions for the
radial part, one for the signal along x, one for y and one for z, while
[3]_ uses one basis function to model the radial part and real
Spherical Harmonics to model the angular part.
From the MAPMRI coefficients it is possible to estimate various
q-space indices, the PDF and the ODF.
The fitting procedure can be constrained using the positivity
constraint proposed in [1]_ and/or the laplacian regularization
proposed in [4]_.
For the estimation of q-space indices we recommend using the 'regular'
anisotropic implementation of MAPMRI. However, it has been shown that
the ODF estimation in this implementation has a bias which
'squeezes together' the ODF peaks when there is a crossing at an angle
smaller than 90 degrees [4]_. When you want to estimate ODFs for
tractography we therefore recommend using the isotropic implementation
(which is equivalent to [3]_).
The switch between isotropic and anisotropic can be easily made through
the anisotropic_scaling option.
Parameters
----------
gtab : GradientTable,
gradient directions and bvalues container class
radial_order : unsigned int,
an even integer that represent the order of the basis
laplacian_regularization: bool,
Regularize using the Laplacian of the MAP-MRI basis.
laplacian_weighting: string or scalar,
The string 'GCV' makes it use generalized cross-validation to find
the regularization weight [4]. A scalar sets the regularization
weight to that value and an array will make it selected the
optimal weight from the values in the array.
positivity_constraint : bool,
Constrain the propagator to be positive.
pos_grid : integer,
The number of points in the grid that is used in the positivity
constraint.
pos_radius : float or string,
If set to a float, the maximum distance the the positivity
constraint constrains to posivity is that value. If set to
`adaptive', the maximum distance is dependent on the estimated
tissue diffusivity.
anisotropic_scaling : bool,
If True, uses the standard anisotropic MAP-MRI basis. If False,
uses the isotropic MAP-MRI basis (equal to 3D-SHORE).
eigenvalue_threshold : float,
Sets the minimum of the tensor eigenvalues in order to avoid
stability problem.
bval_threshold : float,
Sets the b-value threshold to be used in the scale factor
estimation. In order for the estimated non-Gaussianity to have
meaning this value should set to a lower value (b<2000 s/mm^2)
such that the scale factors are estimated on signal points that
reasonably represent the spins at Gaussian diffusion.
dti_scale_estimation : bool,
Whether or not DTI fitting is used to estimate the isotropic scale
factor for isotropic MAP-MRI.
When set to False the algorithm presets the isotropic tissue
diffusivity to static_diffusivity. This vastly increases fitting
speed but at the cost of slightly reduced fitting quality. Can
still be used in combination with regularization and constraints.
static_diffusivity : float,
the tissue diffusivity that is used when dti_scale_estimation is
set to False. The default is that of typical white matter
D=0.7e-3 _[5].
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. [2] Ozarslan E. et. al, "Simple harmonic oscillator based
reconstruction and estimation for one-dimensional q-space
magnetic resonance 1D-SHORE)", eapoc Intl Soc Mag Reson Med,
vol. 16, p. 35., 2008.
.. [3] Ozarslan E. et. al, "Simple harmonic oscillator based
reconstruction and estimation for three-dimensional q-space
mri", ISMRM 2009.
.. [4] Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP
data." NeuroImage (2016).
.. [5] Merlet S. et. al, "Continuous diffusion signal, EAP and ODF
estimation via Compressive Sensing in diffusion MRI", Medical
Image Analysis, 2013.
Examples
--------
In this example, where the data, gradient table and sphere tessellation
used for reconstruction are provided, we model the diffusion signal
with respect to the SHORE basis and compute the real and analytical
ODF.
>>> from dipy.data import dsi_voxels, get_sphere
>>> data, gtab = dsi_voxels()
>>> from dipy.sims.voxel import SticksAndBall
>>> data, golden_directions = SticksAndBall(
... gtab, d=0.0015,
... S0=1, angles=[(0, 0), (90, 0)],
... fractions=[50, 50], snr=None)
>>> from dipy.reconst.mapmri import MapmriModel
>>> radial_order = 4
>>> map_model = MapmriModel(gtab, radial_order=radial_order)
>>> mapfit = map_model.fit(data)
>>> sphere = get_sphere('symmetric724')
>>> odf = mapfit.odf(sphere)
"""
self.bvals = gtab.bvals
self.bvecs = gtab.bvecs
self.gtab = gtab
if radial_order < 0 or radial_order % 2:
msg = "radial_order must be a positive, even number."
raise ValueError(msg)
self.radial_order = radial_order
self.bval_threshold = bval_threshold
self.dti_scale_estimation = dti_scale_estimation
self.laplacian_regularization = laplacian_regularization
if self.laplacian_regularization:
msg = "Laplacian Regularization weighting must be 'GCV', "
msg += "a positive float or an array of positive floats."
if isinstance(laplacian_weighting, str):
if laplacian_weighting is not 'GCV':
raise ValueError(msg)
elif (isinstance(laplacian_weighting, float) or
isinstance(laplacian_weighting, np.ndarray)):
if np.sum(laplacian_weighting < 0) > 0:
raise ValueError(msg)
self.laplacian_weighting = laplacian_weighting
self.positivity_constraint = positivity_constraint
if self.positivity_constraint:
if not have_cvxopt:
raise ValueError(
'CVXOPT package needed to enforce constraints')
if not hasattr(cvxopt, 'solvers'):
raise ValueError("CVXOPT version 1.1.7 or higher required")
msg = "pos_radius must be 'adaptive' or a positive float"
if isinstance(pos_radius, str):
if pos_radius != 'adaptive':
raise ValueError(msg)
elif isinstance(pos_radius, float) or isinstance(pos_radius, int):
if pos_radius <= 0:
raise ValueError(msg)
self.constraint_grid = create_rspace(pos_grid, pos_radius)
if not anisotropic_scaling:
self.pos_K_independent = mapmri_isotropic_K_mu_independent(
radial_order, self.constraint_grid)
else:
raise ValueError(msg)
self.pos_grid = pos_grid
self.pos_radius = pos_radius
self.anisotropic_scaling = anisotropic_scaling
if (gtab.big_delta is None) or (gtab.small_delta is None):
self.tau = 1 / (4 * np.pi ** 2)
else:
self.tau = gtab.big_delta - gtab.small_delta / 3.0
self.eigenvalue_threshold = eigenvalue_threshold
self.cutoff = gtab.bvals < self.bval_threshold
gtab_cutoff = gradient_table(bvals=self.gtab.bvals[self.cutoff],
bvecs=self.gtab.bvecs[self.cutoff])
self.tenmodel = dti.TensorModel(gtab_cutoff)
if self.anisotropic_scaling:
self.ind_mat = mapmri_index_matrix(self.radial_order)
self.Bm = b_mat(self.ind_mat)
self.S_mat, self.T_mat, self.U_mat = mapmri_STU_reg_matrices(
radial_order)
else:
self.ind_mat = mapmri_isotropic_index_matrix(self.radial_order)
self.Bm = b_mat_isotropic(self.ind_mat)
self.laplacian_matrix = mapmri_isotropic_laplacian_reg_matrix(
radial_order, 1.)
qvals = np.sqrt(self.gtab.bvals / self.tau) / (2 * np.pi)
q = gtab.bvecs * qvals[:, None]
if self.dti_scale_estimation:
self.M_mu_independent = mapmri_isotropic_M_mu_independent(
self.radial_order, q)
else:
D = static_diffusivity
mumean = np.sqrt(2 * D * self.tau)
self.mu = np.array([mumean, mumean, mumean])
self.M = mapmri_isotropic_phi_matrix(radial_order, mumean, q)
if (self.laplacian_regularization and
isinstance(laplacian_weighting, float) and
not positivity_constraint):
MMt = (np.dot(self.M.T, self.M) +
laplacian_weighting * mumean *
self.laplacian_matrix)
self.MMt_inv_Mt = np.dot(np.linalg.pinv(MMt), self.M.T)
@multi_voxel_fit
def fit(self, data):
errorcode = 0
tenfit = self.tenmodel.fit(data[self.cutoff])
evals = tenfit.evals
R = tenfit.evecs
evals = np.clip(evals, self.eigenvalue_threshold, evals.max())
qvals = np.sqrt(self.gtab.bvals / self.tau) / (2 * np.pi)
mu_max = max(np.sqrt(evals * 2 * self.tau)) # used for constraint
if self.anisotropic_scaling:
mu = np.sqrt(evals * 2 * self.tau)
qvecs = np.dot(self.gtab.bvecs, R)
q = qvecs * qvals[:, None]
M = mapmri_phi_matrix(self.radial_order, mu, q)
else:
try:
self.MMt_inv_Mt
lopt = self.laplacian_weighting
coef = np.dot(self.MMt_inv_Mt, data)
coef = coef / sum(coef * self.Bm)
return MapmriFit(self, coef, self.mu, R, lopt, errorcode)
except AttributeError:
try:
M = self.M
mu = self.mu
except AttributeError:
u0 = isotropic_scale_factor(evals * 2 * self.tau)
mu = np.array([u0, u0, u0])
q = self.gtab.bvecs * qvals[:, None]
M_mu_dependent = mapmri_isotropic_M_mu_dependent(
self.radial_order, mu[0], qvals)
M = M_mu_dependent * self.M_mu_independent
if self.laplacian_regularization:
if self.anisotropic_scaling:
laplacian_matrix = mapmri_laplacian_reg_matrix(
self.ind_mat, mu, self.S_mat, self.T_mat, self.U_mat)
else:
laplacian_matrix = self.laplacian_matrix * mu[0]
if self.laplacian_weighting is 'GCV':
try:
lopt = generalized_crossvalidation(data, M,
laplacian_matrix)
except np.linalg.linalg.LinAlgError:
1/0.
lopt = 0.05
errorcode = 1
elif np.isscalar(self.laplacian_weighting):
lopt = self.laplacian_weighting
else:
lopt = generalized_crossvalidation_array(
data,
M,
laplacian_matrix,
self.laplacian_weighting)
else:
lopt = 0.
laplacian_matrix = np.ones((self.ind_mat.shape[0],
self.ind_mat.shape[0]))
if self.positivity_constraint:
w_s = "The MAPMRI positivity constraint depends on CVXOPT "
w_s += "(http://cvxopt.org/). CVXOPT is licensed "
w_s += "under the GPL (see: http://cvxopt.org/copyright.html) "
w_s += "and you may be subject to this license when using the "
w_s += "positivity constraint."
warn(w_s)
if self.pos_radius == 'adaptive':
# custom constraint grid based on scale factor [Avram2015]
constraint_grid = create_rspace(self.pos_grid,
np.sqrt(5) * mu_max)
else:
constraint_grid = self.constraint_grid
if self.anisotropic_scaling:
K = mapmri_psi_matrix(self.radial_order, mu, constraint_grid)
else:
if self.pos_radius == 'adaptive':
# grid changes per voxel. Recompute entire K matrix.
K = mapmri_isotropic_psi_matrix(self.radial_order, mu[0],
constraint_grid)
else:
# grid is static. Only compute mu-dependent part of K.
K_dependent = mapmri_isotropic_K_mu_dependent(
self.radial_order, mu[0], constraint_grid)
K = K_dependent * self.pos_K_independent
if isinstance(data, np.memmap):
data = np.asarray(data)
data = np.asarray(data / data[self.gtab.b0s_mask].mean())
M0 = M[self.gtab.b0s_mask, :]
M0_mean = M0.mean(0)[None, :]
Mprime = np.r_[M0_mean, M[~self.gtab.b0s_mask, :]]
Q = cvxopt.matrix(np.ascontiguousarray(
np.dot(Mprime.T, Mprime) + lopt * laplacian_matrix))
data_b0 = data[self.gtab.b0s_mask].mean()
data_single_b0 = np.r_[
data_b0, data[~self.gtab.b0s_mask]] / data_b0
p = cvxopt.matrix(np.ascontiguousarray(
-1 * np.dot(Mprime.T, data_single_b0)))
G = cvxopt.matrix(-1 * K)
h = cvxopt.matrix((1e-10) * np.ones((K.shape[0])), (K.shape[0], 1))
A = cvxopt.matrix(np.ascontiguousarray(M0_mean))
b = cvxopt.matrix(np.array([1.]))
cvxopt.solvers.options['show_progress'] = False
try:
sol = cvxopt.solvers.qp(Q, p, G, h, A, b)
coef = np.array(sol['x'])[:, 0]
except ValueError:
errorcode = 2
warn('Optimization did not find a solution')
try:
coef = np.dot(np.linalg.pinv(M), data) # least squares
except np.linalg.linalg.LinAlgError:
errorcode = 3
coef = np.zeros(M.shape[1])
return MapmriFit(self, coef, mu, R, lopt, errorcode)
else:
try:
pseudoInv = np.dot(
np.linalg.inv(np.dot(M.T, M) + lopt * laplacian_matrix),
M.T)
coef = np.dot(pseudoInv, data)
except np.linalg.linalg.LinAlgError:
errorcode = 1
coef = np.zeros(M.shape[1])
return MapmriFit(self, coef, mu, R, lopt, errorcode)
coef = coef / sum(coef * self.Bm)
return MapmriFit(self, coef, mu, R, lopt, errorcode)
class MapmriFit(ReconstFit):
def __init__(self, model, mapmri_coef, mu, R, lopt, errorcode=0):
""" Calculates diffusion properties for a single voxel
Parameters
----------
model : object,
AnalyticalModel
mapmri_coef : 1d ndarray,
mapmri coefficients
mu : array, shape (3,)
scale parameters vector for x, y and z
R : array, shape (3,3)
rotation matrix
lopt : float,
regularization weight used for laplacian regularization
errorcode : int
provides information on whether errors occurred in the fitting
of each voxel. 0 means no problem, 1 means a LinAlgError
occurred when trying to invert the design matrix. 2 means the
positivity constraint was unable to solve the problem. 3 means
that after positivity constraint failed, also matrix inversion
failed.
"""
self.model = model
self._mapmri_coef = mapmri_coef
self.gtab = model.gtab
self.radial_order = model.radial_order
self.mu = mu
self.R = R
self.lopt = lopt
self.errorcode = errorcode
@property
def mapmri_mu(self):
"""The MAPMRI scale factors
"""
return self.mu
@property
def mapmri_R(self):
"""The MAPMRI rotation matrix
"""
return self.R
@property
def mapmri_coeff(self):
"""The MAPMRI coefficients
"""
return self._mapmri_coef
def odf(self, sphere, s=2):
r""" Calculates the analytical Orientation Distribution Function (ODF)
from the signal [1]_ Eq. (32).
Parameters
----------
s : unsigned int
radial moment of the ODF
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
if self.model.anisotropic_scaling:
v_ = sphere.vertices
v = np.dot(v_, self.R)
I_s = mapmri_odf_matrix(self.radial_order, self.mu, s, v)
odf = np.dot(I_s, self._mapmri_coef)
else:
I = self.model.cache_get('ODF_matrix', key=(sphere, s))
if I is None:
I = mapmri_isotropic_odf_matrix(self.radial_order, 1,
s, sphere.vertices)
self.model.cache_set('ODF_matrix', (sphere, s), I)
odf = self.mu[0] ** s * np.dot(I, self._mapmri_coef)
return odf
def odf_sh(self, s=2):
r""" Calculates the real analytical odf for a given discrete sphere.
Computes the design matrix of the ODF for the given sphere vertices
and radial moment [1]_ eq. (32). The radial moment s acts as a
sharpening method. The analytical equation for the spherical ODF basis
is given in [2]_ eq. (C8).
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. [1]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP data."
NeuroImage (2016).
"""
if self.model.anisotropic_scaling:
msg = 'odf in spherical harmonics not yet implemented for '
msg += 'anisotropic implementation'
raise ValueError(msg)
I = self.model.cache_get('ODF_sh_matrix', key=(self.radial_order, s))
if I is None:
I = mapmri_isotropic_odf_sh_matrix(self.radial_order, 1, s)
self.model.cache_set('ODF_sh_matrix', (self.radial_order, s), I)
odf = self.mu[0] ** s * np.dot(I, self._mapmri_coef)
return odf
def rtpp(self):
r""" Calculates the analytical return to the plane probability (RTPP)
[1]_ eq. (42). The analytical formula for the isotropic MAP-MRI
basis was derived in [2]_ eq. (C11).
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. [2]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP data."
NeuroImage (2016).
"""
Bm = self.model.Bm
ind_mat = self.model.ind_mat
if self.model.anisotropic_scaling:
sel = Bm > 0. # select only relevant coefficients
const = 1 / (np.sqrt(2 * np.pi) * self.mu[0])
ind_sum = (-1.0) ** (ind_mat[sel, 0] / 2.0)
rtpp_vec = const * Bm[sel] * ind_sum * self._mapmri_coef[sel]
rtpp = rtpp_vec.sum()
return rtpp
else:
rtpp_vec = np.zeros((ind_mat.shape[0]))
count = 0
for n in range(0, self.model.radial_order + 1, 2):
for j in range(1, 2 + n // 2):
l = n + 2 - 2 * j
const = (-1/2.0) ** (l/2) / np.sqrt(np.pi)
matsum = 0
for k in range(0, j):
matsum += (-1) ** k * \
binomialfloat(j + l - 0.5, j - k - 1) *\
gamma(l / 2 + k + 1 / 2.0) /\
(factorial(k) * 0.5 ** (l / 2 + 1 / 2.0 + k))
for m in range(-l, l + 1):
rtpp_vec[count] = const * matsum
count += 1
direction = np.array(self.R[:, 0], ndmin=2)
r, theta, phi = cart2sphere(direction[:, 0], direction[:, 1],
direction[:, 2])
rtpp = self._mapmri_coef * (1 / self.mu[0]) *\
rtpp_vec * real_sph_harm(ind_mat[:, 2], ind_mat[:, 1],
theta, phi)
return rtpp.sum()
def rtap(self):
r""" Calculates the analytical return to the axis probability (RTAP)
[1]_ eq. (40, 44a). The analytical formula for the isotropic MAP-MRI
basis was derived in [2]_ eq. (C11).
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. [2]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP data."
NeuroImage (2016).
"""
Bm = self.model.Bm
ind_mat = self.model.ind_mat
if self.model.anisotropic_scaling:
sel = Bm > 0. # select only relevant coefficients
const = 1 / (2 * np.pi * np.prod(self.mu[1:]))
ind_sum = (-1.0) ** ((np.sum(ind_mat[sel, 1:], axis=1) / 2.0))
rtap_vec = const * Bm[sel] * ind_sum * self._mapmri_coef[sel]
rtap = np.sum(rtap_vec)
else:
rtap_vec = np.zeros((ind_mat.shape[0]))
count = 0
for n in range(0, self.model.radial_order + 1, 2):
for j in range(1, 2 + n // 2):
l = n + 2 - 2 * j
kappa = ((-1) ** (j - 1) * 2 ** (-(l + 3) / 2.0)) / np.pi
matsum = 0
for k in range(0, j):
matsum += ((-1) ** k *
binomialfloat(j + l - 0.5, j - k - 1) *
gamma((l + 1) / 2.0 + k)) /\
(factorial(k) * 0.5 ** ((l + 1) / 2.0 + k))
for m in range(-l, l + 1):
rtap_vec[count] = kappa * matsum
count += 1
rtap_vec *= 2
direction = np.array(self.R[:, 0], ndmin=2)
r, theta, phi = cart2sphere(direction[:, 0],
direction[:, 1], direction[:, 2])
rtap_vec = self._mapmri_coef * (1 / self.mu[0] ** 2) *\
rtap_vec * real_sph_harm(ind_mat[:, 2], ind_mat[:, 1],
theta, phi)
rtap = rtap_vec.sum()
return rtap
def rtop(self):
r""" Calculates the analytical return to the origin probability (RTOP)
[1]_ eq. (36, 43). The analytical formula for the isotropic MAP-MRI
basis was derived in [2]_ eq. (C11).
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. [2]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP data."
NeuroImage (2016).
"""
Bm = self.model.Bm
if self.model.anisotropic_scaling:
const = 1 / (np.sqrt(8 * np.pi ** 3) * np.prod(self.mu))
ind_sum = (-1.0) ** (np.sum(self.model.ind_mat, axis=1) / 2)
rtop_vec = const * ind_sum * Bm * self._mapmri_coef
rtop = rtop_vec.sum()
else:
const = 1 / (2 * np.sqrt(2.0) * np.pi ** (3 / 2.0))
rtop_vec = const * (-1.0) ** (self.model.ind_mat[:, 0] - 1) * Bm
rtop = (1 / self.mu[0] ** 3) * rtop_vec * self._mapmri_coef
rtop = rtop.sum()
return rtop
def msd(self):
r""" Calculates the analytical Mean Squared Displacement (MSD).
It is defined as the Laplacian of the origin of the estimated signal
[1]_. The analytical formula for the MAP-MRI basis was derived in [2]_
eq. (C13, D1).
References
----------
.. [1] Cheng, J., 2014. Estimation and Processing of Ensemble Average
Propagator and Its Features in Diffusion MRI. Ph.D. Thesis.
.. [2]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP data."
NeuroImage (2016).
"""
mu = self.mu
ind_mat = self.model.ind_mat
Bm = self.model.Bm
sel = self.model.Bm > 0. # select only relevant coefficients
mapmri_coef = self._mapmri_coef[sel]
if self.model.anisotropic_scaling:
ind_sum = np.sum(ind_mat[sel], axis=1)
nx, ny, nz = ind_mat[sel].T
numerator = (-1) ** (0.5 * (-ind_sum)) * np.pi ** (3 / 2.0) *\
((1 + 2 * nx) * mu[0] ** 2 + (1 + 2 * ny) *
mu[1] ** 2 + (1 + 2 * nz) * mu[2] ** 2)
denominator = np.sqrt(2. ** (-ind_sum) * factorial(nx) *
factorial(ny) * factorial(nz)) *\
gamma(0.5 - 0.5 * nx) * gamma(0.5 - 0.5 * ny) *\
gamma(0.5 - 0.5 * nz)
msd_vec = self._mapmri_coef[sel] * (numerator / denominator)
msd = msd_vec.sum()
else:
msd_vec = (4 * ind_mat[sel, 0] - 1) * Bm[sel]
msd = self.mu[0] ** 2 * msd_vec * mapmri_coef
msd = msd.sum()
return msd
def qiv(self):
r""" Calculates the analytical Q-space Inverse Variance (QIV).
It is defined as the inverse of the Laplacian of the origin of the
estimated propagator [1]_ eq. (22). The analytical formula for the
MAP-MRI basis was derived in [2]_ eq. (C14, D2).
References
----------
.. [1] Hosseinbor et al. "Bessel fourier orientation reconstruction
(bfor): An analytical diffusion propagator reconstruction for hybrid
diffusion imaging and computation of q-space indices. NeuroImage 64,
2013, 650–670.
.. [2]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP data."
NeuroImage (2016).
"""
ux, uy, uz = self.mu
ind_mat = self.model.ind_mat
if self.model.anisotropic_scaling:
sel = self.model.Bm > 0 # select only relevant coefficients
nx, ny, nz = ind_mat[sel].T
numerator = 8 * np.pi ** 2 * (ux * uy * uz) ** 3 *\
np.sqrt(factorial(nx) * factorial(ny) * factorial(nz)) *\
gamma(0.5 - 0.5 * nx) * gamma(0.5 - 0.5 * ny) * \
gamma(0.5 - 0.5 * nz)
denominator = np.sqrt(2. ** (-1 + nx + ny + nz)) *\
((1 + 2 * nx) * uy ** 2 * uz ** 2 + ux ** 2 *
((1 + 2 * nz) * uy ** 2 + (1 + 2 * ny) * uz ** 2))
qiv_vec = self._mapmri_coef[sel] * (numerator / denominator)
qiv = qiv_vec.sum()
else:
sel = self.model.Bm > 0. # select only relevant coefficients
j = ind_mat[sel, 0]
qiv_vec = ((8 * (-1) ** (1 - j) *
np.sqrt(2) * np.pi ** (7 / 2.)) / ((4 * j - 1) *
self.model.Bm[sel]))
qiv = ux ** 5 * qiv_vec * self._mapmri_coef[sel]
qiv = qiv.sum()
return qiv
def ng(self):
r""" Calculates the analytical non-Gaussiannity (NG) [1]_.
For the NG to be meaningful the mapmri scale factors must be
estimated only on data representing Gaussian diffusion of spins, i.e.,
bvals smaller than about 2000 s/mm^2 [2]_.
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. [2] Avram et al. "Clinical feasibility of using mean apparent
propagator (MAP) MRI to characterize brain tissue microstructure".
NeuroImage 2015, in press.
"""
if self.model.bval_threshold > 2000.:
msg = 'model bval_threshold must be lower than 2000 for the '
msg += 'non_Gaussianity to be physically meaningful [2].'
warn(msg)
if not self.model.anisotropic_scaling:
msg = 'Parallel non-Gaussianity is not defined using '
msg += 'isotropic scaling.'
raise ValueError(msg)
coef = self._mapmri_coef
return np.sqrt(1 - coef[0] ** 2 / np.sum(coef ** 2))
def ng_parallel(self):
r""" Calculates the analytical parallel non-Gaussiannity (NG) [1]_.
For the NG to be meaningful the mapmri scale factors must be
estimated only on data representing Gaussian diffusion of spins, i.e.,
bvals smaller than about 2000 s/mm^2 [2]_.
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. [2] Avram et al. "Clinical feasibility of using mean apparent
propagator (MAP) MRI to characterize brain tissue microstructure".
NeuroImage 2015, in press.
"""
if self.model.bval_threshold > 2000.:
msg = 'Model bval_threshold must be lower than 2000 for the '
msg += 'non_Gaussianity to be physically meaningful [2].'
warn(msg)
if not self.model.anisotropic_scaling:
msg = 'Parallel non-Gaussianity is not defined using '
msg += 'isotropic scaling.'
raise ValueError(msg)
ind_mat = self.model.ind_mat
coef = self._mapmri_coef
a_par = np.zeros_like(coef)
a0 = np.zeros_like(coef)
for i in range(coef.shape[0]):
n1, n2, n3 = ind_mat[i]
if (n2 % 2 + n3 % 2) == 0:
a_par[i] = coef[i] * (-1) ** ((n2 + n3) / 2) *\
np.sqrt(factorial(n2) * factorial(n3)) /\
(factorial2(n2) * factorial2(n3))
if n1 == 0:
a0[i] = a_par[i]
return np.sqrt(1 - np.sum(a0 ** 2) / np.sum(a_par ** 2))
def ng_perpendicular(self):
r""" Calculates the analytical perpendicular non-Gaussiannity (NG)
[1]_. For the NG to be meaningful the mapmri scale factors must be
estimated only on data representing Gaussian diffusion of spins, i.e.,
bvals smaller than about 2000 s/mm^2 [2]_.
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. [2] Avram et al. "Clinical feasibility of using mean apparent
propagator (MAP) MRI to characterize brain tissue microstructure".
NeuroImage 2015, in press.
"""
if self.model.bval_threshold > 2000.:
msg = 'model bval_threshold must be lower than 2000 for the '
msg += 'non_Gaussianity to be physically meaningful [2].'
warn(msg)
if not self.model.anisotropic_scaling:
msg = 'Parallel non-Gaussianity is not defined using '
msg += 'isotropic scaling.'
raise ValueError(msg)
ind_mat = self.model.ind_mat
coef = self._mapmri_coef
a_perp = np.zeros_like(coef)
a00 = np.zeros_like(coef)
for i in range(coef.shape[0]):
n1, n2, n3 = ind_mat[i]
if n1 % 2 == 0:
if n2 % 2 == 0 and n3 % 2 == 0:
a_perp[i] = coef[i] * (-1) ** (n1 / 2) *\
np.sqrt(factorial(n1)) / factorial2(n1)
if n2 == 0 and n3 == 0:
a00[i] = a_perp[i]
return np.sqrt(1 - np.sum(a00 ** 2) / np.sum(a_perp ** 2))
def norm_of_laplacian_signal(self):
""" Calculates the norm of the laplacian of the fitted signal [1]_.
This information could be useful to assess if the extrapolation of the
fitted signal contains spurious oscillations. A high laplacian may
indicate that these are present, and any q-space indices that
use integrals of the signal may be corrupted (e.g. RTOP, RTAP, RTPP,
QIV).
References
----------
.. [1]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP data."
NeuroImage (2016).
"""
if self.model.anisotropic_scaling:
laplacian_matrix = mapmri_laplacian_reg_matrix(
self.model.ind_mat, self.mu,
self.model.S_mat, self.model.T_mat, self.model.U_mat)
else:
laplacian_matrix = self.mu[0] * self.model.laplacian_matrix
norm_of_laplacian = np.dot(np.dot(self._mapmri_coef, laplacian_matrix),
self._mapmri_coef)
return norm_of_laplacian
def fitted_signal(self, gtab=None):
"""
Recovers the fitted signal for the given gradient table. If no gradient
table is given it recovers the signal for the gtab of the model object.
"""
if gtab is None:
E = self.predict(self.model.gtab, S0=1.)
else:
E = self.predict(gtab, S0=1.)
return E
def predict(self, qvals_or_gtab, S0=100.):
r"""Recovers the reconstructed signal for any qvalue array or
gradient table.
"""
if isinstance(qvals_or_gtab, np.ndarray):
q = qvals_or_gtab
qvals = np.linalg.norm(q, axis=1)
else:
gtab = qvals_or_gtab
qvals = np.sqrt(gtab.bvals / self.model.tau) / (2 * np.pi)
q = qvals[:, None] * gtab.bvecs
if self.model.anisotropic_scaling:
q_rot = np.dot(q, self.R)
M = mapmri_phi_matrix(self.radial_order, self.mu, q_rot)
else:
M = mapmri_isotropic_phi_matrix(self.radial_order, self.mu[0], q)
E = S0 * np.dot(M, self._mapmri_coef)
return E
def pdf(self, r_points):
""" Diffusion propagator on a given set of real points.
if the array r_points is non writeable, then intermediate
results are cached for faster recalculation
"""
if self.model.anisotropic_scaling:
r_point_rotated = np.dot(r_points, self.R)
K = mapmri_psi_matrix(self.radial_order, self.mu, r_point_rotated)
EAP = np.dot(K, self._mapmri_coef)
else:
if not r_points.flags.writeable:
K_independent = self.model.cache_get(
'mapmri_matrix_pdf_independent', key=hash(r_points.data))
if K_independent is None:
K_independent = mapmri_isotropic_K_mu_independent(
self.radial_order, r_points)
self.model.cache_set('mapmri_matrix_pdf_independent',
hash(r_points.data), K_independent)
K_dependent = mapmri_isotropic_K_mu_dependent(
self.radial_order, self.mu[0], r_points)
K = K_dependent * K_independent
else:
K = mapmri_isotropic_psi_matrix(
self.radial_order, self.mu[0], r_points)
EAP = np.dot(K, self._mapmri_coef)
return EAP
def isotropic_scale_factor(mu_squared):
r"""Estimated isotropic scaling factor _[1] Eq. (49).
Parameters
----------
mu_squared : array, shape (N,3)
squared scale factors of mapmri basis in x, y, z
Returns
-------
u0 : float
closest isotropic scale factor for the isotropic basis
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
X, Y, Z = mu_squared
coef_array = np.array([-3, -(X + Y + Z), (X * Y + X * Z + Y * Z),
3 * X * Y * Z])
# take the real, positive root of the problem.
u0 = np.sqrt(np.real(np.roots(coef_array).max()))
return u0
def mapmri_index_matrix(radial_order):
r""" Calculates the indices for the MAPMRI [1]_ basis in x, y and z.
Parameters
----------
radial_order : unsigned int
radial order of MAPMRI basis
Returns
-------
index_matrix : array, shape (N,3)
ordering of the basis in x, y, z
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
index_matrix = []
for n in range(0, radial_order + 1, 2):
for i in range(0, n + 1):
for j in range(0, n - i + 1):
index_matrix.append([n - i - j, j, i])
return np.array(index_matrix)
def b_mat(index_matrix):
r""" Calculates the B coefficients from [1]_ Eq. (27).
Parameters
----------
index_matrix : array, shape (N,3)
ordering of the basis in x, y, z
Returns
-------
B : array, shape (N,)
B coefficients for the basis
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
B = np.zeros(index_matrix.shape[0])
for i in range(index_matrix.shape[0]):
n1, n2, n3 = index_matrix[i]
K = int(not(n1 % 2) and not(n2 % 2) and not(n3 % 2))
B[i] = (
K * np.sqrt(factorial(n1) * factorial(n2) * factorial(n3)) /
(factorial2(n1) * factorial2(n2) * factorial2(n3))
)
return B
def b_mat_isotropic(index_matrix):
r""" Calculates the isotropic B coefficients from [1]_ Fig 8.
Parameters
----------
index_matrix : array, shape (N,3)
ordering of the isotropic basis in j, l, m
Returns
-------
B : array, shape (N,)
B coefficients for the isotropic basis
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
B = np.zeros((index_matrix.shape[0]))
for i in range(index_matrix.shape[0]):
if index_matrix[i, 1] == 0:
B[i] = genlaguerre(index_matrix[i, 0] - 1, 0.5)(0)
return B
def mapmri_phi_1d(n, q, mu):
r""" One dimensional MAPMRI basis function from [1]_ Eq. (4).
Parameters
-------
n : unsigned int
order of the basis
q : array, shape (N,)
points in the q-space in which evaluate the basis
mu : float
scale factor of the basis
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
qn = 2 * np.pi * mu * q
H = hermite(n)(qn)
i = np.complex(0, 1)
f = factorial(n)
k = i ** (-n) / np.sqrt(2 ** (n) * f)
phi = k * np.exp(- qn ** 2 / 2) * H
return phi
def mapmri_phi_matrix(radial_order, mu, q_gradients):
r"""Compute the MAPMRI phi matrix for the signal [1]_ eq. (23).
Parameters
----------
radial_order : unsigned int,
an even integer that represent the order of the basis
mu : array, shape (3,)
scale factors of the basis for x, y, z
q_gradients : array, shape (N,3)
points in the q-space in which evaluate the basis
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
ind_mat = mapmri_index_matrix(radial_order)
n_elem = ind_mat.shape[0]
n_qgrad = q_gradients.shape[0]
qx, qy, qz = q_gradients.T
mux, muy, muz = mu
Mx_storage = np.array(np.zeros((n_qgrad, radial_order + 1)),
dtype=complex)
My_storage = np.array(np.zeros((n_qgrad, radial_order + 1)),
dtype=complex)
Mz_storage = np.array(np.zeros((n_qgrad, radial_order + 1)),
dtype=complex)
M = np.zeros((n_qgrad, n_elem))
for n in range(radial_order + 1):
Mx_storage[:, n] = mapmri_phi_1d(n, qx, mux)
My_storage[:, n] = mapmri_phi_1d(n, qy, muy)
Mz_storage[:, n] = mapmri_phi_1d(n, qz, muz)
counter = 0
for nx, ny, nz in ind_mat:
M[:, counter] = (
np.real(Mx_storage[:, nx] * My_storage[:, ny] * Mz_storage[:, nz])
)
counter += 1
return M
def mapmri_psi_1d(n, x, mu):
r""" One dimensional MAPMRI propagator basis function from [1]_ Eq. (10).
Parameters
----------
n : unsigned int
order of the basis
x : array, shape (N,)
points in the r-space in which evaluate the basis
mu : float
scale factor of the basis
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
H = hermite(n)(x / mu)
f = factorial(n)
k = 1 / (np.sqrt(2 ** (n + 1) * np.pi * f) * mu)
psi = k * np.exp(- x ** 2 / (2 * mu ** 2)) * H
return psi
def mapmri_psi_matrix(radial_order, mu, rgrad):
r"""Compute the MAPMRI psi matrix for the propagator [1]_ eq. (22).
Parameters
----------
radial_order : unsigned int,
an even integer that represent the order of the basis
mu : array, shape (3,)
scale factors of the basis for x, y, z
rgrad : array, shape (N,3)
points in the r-space in which evaluate the EAP
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
ind_mat = mapmri_index_matrix(radial_order)
n_elem = ind_mat.shape[0]
n_qgrad = rgrad.shape[0]
rx, ry, rz = rgrad.T
mux, muy, muz = mu
Kx_storage = np.zeros((n_qgrad, radial_order + 1))
Ky_storage = np.zeros((n_qgrad, radial_order + 1))
Kz_storage = np.zeros((n_qgrad, radial_order + 1))
K = np.zeros((n_qgrad, n_elem))
for n in range(radial_order + 1):
Kx_storage[:, n] = mapmri_psi_1d(n, rx, mux)
Ky_storage[:, n] = mapmri_psi_1d(n, ry, muy)
Kz_storage[:, n] = mapmri_psi_1d(n, rz, muz)
counter = 0
for nx, ny, nz in ind_mat:
K[:, counter] = (
Kx_storage[:, nx] * Ky_storage[:, ny] * Kz_storage[:, nz]
)
counter += 1
return K
def mapmri_odf_matrix(radial_order, mu, s, vertices):
r"""Compute the MAPMRI ODF matrix [1]_ Eq. (33).
Parameters
----------
radial_order : unsigned int,
an even integer that represent the order of the basis
mu : array, shape (3,)
scale factors of the basis for x, y, z
s : unsigned int
radial moment of the ODF
vertices : array, shape (N,3)
points of the sphere shell in the r-space in which evaluate the ODF
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
ind_mat = mapmri_index_matrix(radial_order)
n_vert = vertices.shape[0]
n_elem = ind_mat.shape[0]
odf_mat = np.zeros((n_vert, n_elem))
mux, muy, muz = mu
# Eq, 35a
rho = 1.0 / np.sqrt((vertices[:, 0] / mux) ** 2 +
(vertices[:, 1] / muy) ** 2 +
(vertices[:, 2] / muz) ** 2)
# Eq, 35b
alpha = 2 * rho * (vertices[:, 0] / mux)
# Eq, 35c
beta = 2 * rho * (vertices[:, 1] / muy)
# Eq, 35d
gamma = 2 * rho * (vertices[:, 2] / muz)
const = rho ** (3 + s) / np.sqrt(2 ** (2 - s) * np.pi **
3 * (mux ** 2 * muy ** 2 * muz ** 2))
for j in range(n_elem):
n1, n2, n3 = ind_mat[j]
f = np.sqrt(factorial(n1) * factorial(n2) * factorial(n3))
odf_mat[:, j] = const * f * \
_odf_cfunc(n1, n2, n3, alpha, beta, gamma, s)
return odf_mat
def _odf_cfunc(n1, n2, n3, a, b, g, s):
r"""Compute the MAPMRI ODF function from [1]_ Eq. (34).
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
f = factorial
f2 = factorial2
sumc = 0
for i in range(0, n1 + 1, 2):
for j in range(0, n2 + 1, 2):
for k in range(0, n3 + 1, 2):
nn = n1 + n2 + n3 - i - j - k
gam = (-1) ** ((i + j + k) / 2.0) * gamma((3 + s + nn) / 2.0)
num1 = a ** (n1 - i)
num2 = b ** (n2 - j)
num3 = g ** (n3 - k)
num = gam * num1 * num2 * num3
denom = f(n1 - i) * f(n2 - j) * f(
n3 - k) * f2(i) * f2(j) * f2(k)
sumc += num / denom
return sumc
def mapmri_isotropic_phi_matrix(radial_order, mu, q):
r""" Three dimensional isotropic MAPMRI signal basis function from [1]_
Eq. (61).
Parameters
----------
radial_order : unsigned int,
radial order of the mapmri basis.
mu : float,
positive isotropic scale factor of the basis
q : array, shape (N,3)
points in the q-space in which evaluate the basis
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
qval, theta, phi = cart2sphere(q[:, 0], q[:, 1], q[:, 2])
theta[np.isnan(theta)] = 0
ind_mat = mapmri_isotropic_index_matrix(radial_order)
n_elem = ind_mat.shape[0]
n_qgrad = q.shape[0]
M = np.zeros((n_qgrad, n_elem))
counter = 0
for n in range(0, radial_order + 1, 2):
for j in range(1, 2 + n // 2):
l = n + 2 - 2 * j
const = mapmri_isotropic_radial_signal_basis(j, l, mu, qval)
for m in range(-l, l+1):
M[:, counter] = const * real_sph_harm(m, l, theta, phi)
counter += 1
return M
def mapmri_isotropic_radial_signal_basis(j, l, mu, qval):
r"""Radial part of the isotropic 1D-SHORE signal basis [1]_ eq. (61).
Parameters
----------
j : unsigned int,
a positive integer related to the radial order
l : unsigned int,
the spherical harmonic order
mu : float,
isotropic scale factor of the basis
qval : float,
points in the q-space in which evaluate the basis
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
pi2_mu2_q2 = 2 * np.pi ** 2 * mu ** 2 * qval ** 2
const = (
(-1) ** (l / 2) * np.sqrt(4.0 * np.pi) *
(pi2_mu2_q2) ** (l / 2) * np.exp(-pi2_mu2_q2) *
genlaguerre(j - 1, l + 0.5)(2 * pi2_mu2_q2)
)
return const
def mapmri_isotropic_M_mu_independent(radial_order, q):
r"""Computed the mu independent part of the signal design matrix.
"""
ind_mat = mapmri_isotropic_index_matrix(radial_order)
qval, theta, phi = cart2sphere(q[:, 0], q[:, 1], q[:, 2])
theta[np.isnan(theta)] = 0
n_elem = ind_mat.shape[0]
n_rgrad = theta.shape[0]
Q_mu_independent = np.zeros((n_rgrad, n_elem))
counter = 0
for n in range(0, radial_order + 1, 2):
for j in range(1, 2 + n // 2):
l = n + 2 - 2 * j
const = np.sqrt(4 * np.pi) * (-1) ** (-l / 2) * \
(2 * np.pi ** 2 * qval ** 2) ** (l / 2)
for m in range(-1 * (n + 2 - 2 * j), (n + 3 - 2 * j)):
Q_mu_independent[:, counter] = const * \
real_sph_harm(m, l, theta, phi)
counter += 1
return Q_mu_independent
def mapmri_isotropic_M_mu_dependent(radial_order, mu, qval):
'''Computed the mu dependent part of the signal design matrix.
'''
ind_mat = mapmri_isotropic_index_matrix(radial_order)
n_elem = ind_mat.shape[0]
n_qgrad = qval.shape[0]
Q_u0_dependent = np.zeros((n_qgrad, n_elem))
pi2q2mu2 = 2 * np.pi ** 2 * mu ** 2 * qval ** 2
counter = 0
for n in range(0, radial_order + 1, 2):
for j in range(1, 2 + n // 2):
l = n + 2 - 2 * j
const = mu ** l * np.exp(-pi2q2mu2) *\
genlaguerre(j - 1, l + 0.5)(2 * pi2q2mu2)
for m in range(-l, l + 1):
Q_u0_dependent[:, counter] = const
counter += 1
return Q_u0_dependent
def mapmri_isotropic_psi_matrix(radial_order, mu, rgrad):
r""" Three dimensional isotropic MAPMRI propagator basis function from [1]_
Eq. (61).
Parameters
----------
radial_order : unsigned int,
radial order of the mapmri basis.
mu : float,
positive isotropic scale factor of the basis
rgrad : array, shape (N,3)
points in the r-space in which evaluate the basis
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
r, theta, phi = cart2sphere(rgrad[:, 0], rgrad[:, 1],
rgrad[:, 2])
theta[np.isnan(theta)] = 0
ind_mat = mapmri_isotropic_index_matrix(radial_order)
n_elem = ind_mat.shape[0]
n_rgrad = rgrad.shape[0]
K = np.zeros((n_rgrad, n_elem))
counter = 0
for n in range(0, radial_order + 1, 2):
for j in range(1, 2 + n // 2):
l = n + 2 - 2 * j
const = mapmri_isotropic_radial_pdf_basis(j, l, mu, r)
for m in range(-l, l + 1):
K[:, counter] = const * real_sph_harm(m, l, theta, phi)
counter += 1
return K
def mapmri_isotropic_radial_pdf_basis(j, l, mu, r):
r"""Radial part of the isotropic 1D-SHORE propagator basis [1]_ eq. (61).
Parameters
----------
j : unsigned int,
a positive integer related to the radial order
l : unsigned int,
the spherical harmonic order
mu : float,
isotropic scale factor of the basis
r : float,
points in the r-space in which evaluate the basis
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
r2u2 = r ** 2 / (2 * mu ** 2)
const = (
(-1) ** (j - 1) / (np.sqrt(2) * np.pi * mu ** 3) *
r2u2 ** (l / 2) * np.exp(-r2u2) * genlaguerre(j - 1, l + 0.5)(2 * r2u2)
)
return const
def mapmri_isotropic_K_mu_independent(radial_order, rgrad):
'''Computes mu independent part of K. Same trick as with M.
'''
r, theta, phi = cart2sphere(rgrad[:, 0], rgrad[:, 1],
rgrad[:, 2])
theta[np.isnan(theta)] = 0
ind_mat = mapmri_isotropic_index_matrix(radial_order)
n_elem = ind_mat.shape[0]
n_rgrad = rgrad.shape[0]
K = np.zeros((n_rgrad, n_elem))
counter = 0
for n in range(0, radial_order + 1, 2):
for j in range(1, 2 + n // 2):
l = n + 2 - 2 * j
const = (-1) ** (j - 1) *\
(np.sqrt(2) * np.pi) ** (-1) *\
(r ** 2 / 2) ** (l / 2)
for m in range(-l, l+1):
K[:, counter] = const * real_sph_harm(m, l, theta, phi)
counter += 1
return K
def mapmri_isotropic_K_mu_dependent(radial_order, mu, rgrad):
'''Computes mu dependent part of M. Same trick as with M.
'''
r, theta, phi = cart2sphere(rgrad[:, 0], rgrad[:, 1],
rgrad[:, 2])
theta[np.isnan(theta)] = 0
ind_mat = mapmri_isotropic_index_matrix(radial_order)
n_elem = ind_mat.shape[0]
n_rgrad = rgrad.shape[0]
K = np.zeros((n_rgrad, n_elem))
r2mu2 = r ** 2 / (2 * mu ** 2)
counter = 0
for n in range(0, radial_order + 1, 2):
for j in range(1, 2 + n // 2):
l = n + 2 - 2 * j
const = (mu ** 3) ** (-1) * mu ** (-l) *\
np.exp(-r2mu2) * genlaguerre(j - 1, l + 0.5)(2 * r2mu2)
for m in range(-l, l + 1):
K[:, counter] = const
counter += 1
return K
def binomialfloat(n, k):
"""Custom Binomial function
"""
return factorial(n) / (factorial(n - k) * factorial(k))
def mapmri_isotropic_odf_matrix(radial_order, mu, s, vertices):
r"""Compute the isotropic MAPMRI ODF matrix [1]_ Eq. 32 but for the
isotropic propagator in [1]_ eq. (60). Analytical derivation in
[2]_ eq. (C8).
Parameters
----------
radial_order : unsigned int,
an even integer that represent the order of the basis
mu : float,
isotropic scale factor of the isotropic MAP-MRI basis
s : unsigned int
radial moment of the ODF
vertices : array, shape (N,3)
points of the sphere shell in the r-space in which evaluate the ODF
Returns
-------
odf_mat : Matrix, shape (N_vertices, N_mapmri_coef)
ODF design matrix to discrete sphere function
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. [2]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP data."
NeuroImage (2016).
"""
r, theta, phi = cart2sphere(vertices[:, 0], vertices[:, 1],
vertices[:, 2])
theta[np.isnan(theta)] = 0
ind_mat = mapmri_isotropic_index_matrix(radial_order)
n_vert = vertices.shape[0]
n_elem = ind_mat.shape[0]
odf_mat = np.zeros((n_vert, n_elem))
counter = 0
for n in range(0, radial_order + 1, 2):
for j in range(1, 2 + n // 2):
l = n + 2 - 2 * j
kappa = ((-1) ** (j - 1) * 2 ** (-(l + 3) / 2.0) * mu ** s) / np.pi
matsum = 0
for k in range(0, j):
matsum += ((-1) ** k * binomialfloat(j + l - 0.5, j - k - 1) *
gamma((l + s + 3) / 2.0 + k)) /\
(factorial(k) * 0.5 ** ((l + s + 3) / 2.0 + k))
for m in range(-l, l + 1):
odf_mat[:, counter] = kappa * matsum *\
real_sph_harm(m, l, theta, phi)
counter += 1
return odf_mat
def mapmri_isotropic_odf_sh_matrix(radial_order, mu, s):
r"""Compute the isotropic MAPMRI ODF matrix [1]_ Eq. 32 for the isotropic
propagator in [1]_ eq. (60). Here we do not compute the sphere function but
the spherical harmonics by only integrating the radial part of the
propagator. We use the same derivation of the ODF in the isotropic
implementation as in [2]_ eq. (C8).
Parameters
----------
radial_order : unsigned int,
an even integer that represent the order of the basis
mu : float,
isotropic scale factor of the isotropic MAP-MRI basis
s : unsigned int
radial moment of the ODF
Returns
-------
odf_sh_mat : Matrix, shape (N_sh_coef, N_mapmri_coef)
ODF design matrix to spherical harmonics
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. [2]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP data."
NeuroImage (2016).
"""
sh_mat = sph_harm_ind_list(radial_order)
ind_mat = mapmri_isotropic_index_matrix(radial_order)
n_elem_shore = ind_mat.shape[0]
n_elem_sh = sh_mat[0].shape[0]
odf_sh_mat = np.zeros((n_elem_sh, n_elem_shore))
counter = 0
for n in range(0, radial_order + 1, 2):
for j in range(1, 2 + n // 2):
l = n + 2 - 2 * j
kappa = ((-1) ** (j - 1) * 2 ** (-(l + 3) / 2.0) * mu ** s) / np.pi
matsum = 0
for k in range(0, j):
matsum += ((-1) ** k * binomialfloat(j + l - 0.5, j - k - 1) *
gamma((l + s + 3) / 2.0 + k)) /\
(factorial(k) * 0.5 ** ((l + s + 3) / 2.0 + k))
for m in range(-l, l + 1):
index_overlap = np.all([l == sh_mat[1], m == sh_mat[0]], 0)
odf_sh_mat[:, counter] = kappa * matsum * index_overlap
counter += 1
return odf_sh_mat
def mapmri_isotropic_laplacian_reg_matrix(radial_order, mu):
r''' Computes the Laplacian regularization matrix for MAP-MRI's isotropic
implementation [1]_ eq. (C7).
Parameters
----------
radial_order : unsigned int,
an even integer that represent the order of the basis
mu : float,
isotropic scale factor of the isotropic MAP-MRI basis
Returns
-------
LR : Matrix, shape (N_coef, N_coef)
Laplacian regularization matrix
References
----------
.. [1]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP data."
NeuroImage (2016).
'''
ind_mat = mapmri_isotropic_index_matrix(radial_order)
n_elem = ind_mat.shape[0]
LR = np.zeros((n_elem, n_elem))
for i in range(n_elem):
for k in range(i, n_elem):
if ind_mat[i, 1] == ind_mat[k, 1] and \
ind_mat[i, 2] == ind_mat[k, 2]:
ji = ind_mat[i, 0]
jk = ind_mat[k, 0]
l = ind_mat[i, 1]
if ji == (jk + 2):
LR[i, k] = LR[k, i] = 2 ** (2 - l) * np.pi ** 2 * mu *\
gamma(5 / 2.0 + jk + l) / gamma(jk)
elif ji == (jk + 1):
LR[i, k] = LR[k, i] = 2 ** (2 - l) * np.pi ** 2 * mu *\
(-3 + 4 * ji + 2 * l) * gamma(3 / 2.0 + jk + l) /\
gamma(jk)
elif ji == jk:
LR[i, k] = 2 ** (-l) * np.pi ** 2 * mu *\
(3 + 24 * ji ** 2 + 4 * (-2 + l) *
l + 12 * ji * (-1 + 2 * l)) *\
gamma(1 / 2.0 + ji + l) / gamma(ji)
elif ji == (jk - 1):
LR[i, k] = LR[k, i] = 2 ** (2 - l) * np.pi ** 2 * mu *\
(-3 + 4 * jk + 2 * l) * gamma(3 / 2.0 + ji + l) /\
gamma(ji)
elif ji == (jk - 2):
LR[i, k] = LR[k, i] = 2 ** (2 - l) * np.pi ** 2 * mu *\
gamma(5 / 2.0 + ji + l) / gamma(ji)
return LR
def mapmri_isotropic_index_matrix(radial_order):
r""" Calculates the indices for the isotropic MAPMRI basis [1]_ Fig 8.
Parameters
----------
radial_order : unsigned int
radial order of isotropic MAPMRI basis
Returns
-------
index_matrix : array, shape (N,3)
ordering of the basis in x, y, z
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
index_matrix = []
for n in range(0, radial_order + 1, 2):
for j in range(1, 2 + n // 2):
for m in range(-1 * (n + 2 - 2 * j), (n + 3 - 2 * j)):
index_matrix.append([j, n + 2 - 2 * j, m])
return np.array(index_matrix)
def create_rspace(gridsize, radius_max):
""" Create the real space table, that contains the points in which
to compute the pdf.
Parameters
----------
gridsize : unsigned int
dimension of the propagator grid
radius_max : float
maximal radius in which compute the propagator
Returns
-------
tab : array, shape (N,3)
real space points in which calculates the pdf
"""
radius = gridsize // 2
vecs = []
for i in range(-radius, radius + 1):
for j in range(-radius, radius + 1):
for k in range(0, radius + 1):
vecs.append([i, j, k])
vecs = np.array(vecs, dtype=np.float32)
# there are points in the corners farther than sphere radius
points_inside_sphere = np.sqrt(np.einsum('ij,ij->i', vecs, vecs)) <= radius
vecs_inside_sphere = vecs[points_inside_sphere]
tab = vecs_inside_sphere / radius
tab = tab * radius_max
return tab
def delta(n, m):
if n == m:
return 1
return 0
def map_laplace_u(n, m):
""" S(n, m) static matrix for Laplacian regularization [1]_ eq. (13).
Parameters
----------
n, m : unsigned int
basis order of the MAP-MRI basis in different directions
Returns
-------
U : float,
Analytical integral of $\phi_n(q) * \phi_m(q)$
References
----------
.. [1]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP data."
NeuroImage (2016).
"""
return (-1) ** n * delta(n, m) / (2 * np.sqrt(np.pi))
def map_laplace_t(n, m):
""" L(m, n) static matrix for Laplacian regularization [1]_ eq. (12).
Parameters
----------
n, m : unsigned int
basis order of the MAP-MRI basis in different directions
Returns
-------
T : float
Analytical integral of $\phi_n(q) * \phi_m''(q)$
References
----------
.. [1]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP data."
NeuroImage (2016).
"""
a = np.sqrt((m - 1) * m) * delta(m - 2, n)
b = np.sqrt((n - 1) * n) * delta(n - 2, m)
c = (2 * n + 1) * delta(m, n)
return np.pi ** (3 / 2.) * (-1) ** (n + 1) * (a + b + c)
def map_laplace_s(n, m):
""" R(m,n) static matrix for Laplacian regularization [1]_ eq. (11).
Parameters
----------
n, m : unsigned int
basis order of the MAP-MRI basis in different directions
Returns
-------
S : float
Analytical integral of $\phi_n''(q) * \phi_m''(q)$
References
----------
.. [1]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP data."
NeuroImage (2016).
"""
k = 2 * np.pi ** (7 / 2.) * (-1) ** (n)
a0 = 3 * (2 * n ** 2 + 2 * n + 1) * delta(n, m)
sqmn = np.sqrt(gamma(m + 1) / gamma(n + 1))
sqnm = 1 / sqmn
an2 = 2 * (2 * n + 3) * sqmn * delta(m, n + 2)
an4 = sqmn * delta(m, n + 4)
am2 = 2 * (2 * m + 3) * sqnm * delta(m + 2, n)
am4 = sqnm * delta(m + 4, n)
return k * (a0 + an2 + an4 + am2 + am4)
def mapmri_STU_reg_matrices(radial_order):
""" Generates the static portions of the Laplacian regularization matrix
according to [1]_ eq. (11, 12, 13).
Parameters
----------
radial_order : unsigned int,
an even integer that represent the order of the basis
Returns
-------
S, T, U : Matrices, shape (N_coef,N_coef)
Regularization submatrices
References
----------
.. [1]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP data."
NeuroImage (2016).
"""
S = np.zeros((radial_order + 1, radial_order + 1))
for i in range(radial_order + 1):
for j in range(radial_order + 1):
S[i, j] = map_laplace_s(i, j)
T = np.zeros((radial_order + 1, radial_order + 1))
for i in range(radial_order + 1):
for j in range(radial_order + 1):
T[i, j] = map_laplace_t(i, j)
U = np.zeros((radial_order + 1, radial_order + 1))
for i in range(radial_order + 1):
for j in range(radial_order + 1):
U[i, j] = map_laplace_u(i, j)
return S, T, U
def mapmri_laplacian_reg_matrix(ind_mat, mu, S_mat, T_mat, U_mat):
""" Puts the Laplacian regularization matrix together [1]_ eq. (10).
The static parts in S, T and U are multiplied and divided by the
voxel-specific scale factors.
Parameters
----------
ind_mat : matrix (N_coef, 3),
Basis order matrix
mu : array, shape (3,)
scale factors of the basis for x, y, z
S, T, U : matrices, shape (N_coef,N_coef)
Regularization submatrices
Returns
-------
LR : matrix (N_coef, N_coef),
Voxel-specific Laplacian regularization matrix
References
----------
.. [1]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP data."
NeuroImage (2016).
"""
ux, uy, uz = mu
x, y, z = ind_mat.T
n_elem = ind_mat.shape[0]
LR = np.zeros((n_elem, n_elem))
for i in range(n_elem):
for j in range(i, n_elem):
if (
(x[i] - x[j]) % 2 == 0 and
(y[i] - y[j]) % 2 == 0 and
(z[i] - z[j]) % 2 == 0
):
LR[i, j] = LR[j, i] = \
(ux ** 3 / (uy * uz)) *\
S_mat[x[i], x[j]] * U_mat[y[i], y[j]] * U_mat[z[i], z[j]] +\
(uy ** 3 / (ux * uz)) *\
S_mat[y[i], y[j]] * U_mat[z[i], z[j]] * U_mat[x[i], x[j]] +\
(uz ** 3 / (ux * uy)) *\
S_mat[z[i], z[j]] * U_mat[x[i], x[j]] * U_mat[y[i], y[j]] +\
2 * ((ux * uy) / uz) *\
T_mat[x[i], x[j]] * T_mat[y[i], y[j]] * U_mat[z[i], z[j]] +\
2 * ((ux * uz) / uy) *\
T_mat[x[i], x[j]] * T_mat[z[i], z[j]] * U_mat[y[i], y[j]] +\
2 * ((uz * uy) / ux) *\
T_mat[z[i], z[j]] * T_mat[y[i], y[j]] * U_mat[x[i], x[j]]
return LR
def generalized_crossvalidation_array(data, M, LR, weights_array=None):
"""Generalized Cross Validation Function [1]_ eq. (15).
Here weights_array is a numpy array with all values that should be
considered in the GCV. It will run through the weights until the cost
function starts to increase, then stop and take the last value as the
optimum weight.
Parameters
----------
data : array (N),
Basis order matrix
M : matrix, shape (N, Ncoef)
mapmri observation matrix
LR : matrix, shape (N_coef, N_coef)
regularization matrix
weights_array : array (N_of_weights)
array of optional regularization weights
"""
if weights_array is None:
lrange = np.linspace(0.05, 1, 20) # reasonably fast standard range
else:
lrange = weights_array
samples = lrange.shape[0]
MMt = np.dot(M.T, M)
K = len(data)
gcvold = gcvnew = 10e10 # set initialization gcv threshold very high
i = -1
while gcvold >= gcvnew and i < samples - 2:
gcvold = gcvnew
i = i + 1
S = np.dot(np.dot(M, np.linalg.pinv(MMt + lrange[i] * LR)), M.T)
trS = np.matrix.trace(S)
normyytilde = np.linalg.norm(data - np.dot(S, data), 2)
gcvnew = normyytilde / (K - trS)
lopt = lrange[i - 1]
return lopt
def generalized_crossvalidation(data, M, LR, gcv_startpoint=5e-2):
"""Generalized Cross Validation Function [1]_ eq. (15).
Finds optimal regularization weight based on generalized cross-validation.
Parameters
----------
data : array (N),
data array
M : matrix, shape (N, Ncoef)
mapmri observation matrix
LR : matrix, shape (N_coef, N_coef)
regularization matrix
gcv_startpoint : float
startpoint for the gcv optimization
Returns
-------
optimal_lambda : float,
optimal regularization weight
References
----------
.. [1]_ Craven et al. "Smoothing Noisy Data with Spline Functions."
NUMER MATH 31.4 (1978): 377-403.
"""
MMt = np.dot(M.T, M)
K = len(data)
bounds = ((1e-5, 10),)
solver = Optimizer(fun=gcv_cost_function,
x0=(gcv_startpoint,),
args=((data, M, MMt, K, LR),),
bounds=bounds)
optimal_lambda = solver.xopt
return optimal_lambda
def gcv_cost_function(weight, args):
"""The GCV cost function that is iterated [4]
"""
data, M, MMt, K, LR = args
S = np.dot(np.dot(M, np.linalg.pinv(MMt + weight * LR)), M.T)
trS = np.matrix.trace(S)
normyytilde = np.linalg.norm(data - np.dot(S, data), 2)
gcv_value = normyytilde / (K - trS)
return gcv_value
|
villalonreina/dipy
|
dipy/reconst/mapmri.py
|
Python
|
bsd-3-clause
| 75,106
|
[
"Gaussian"
] |
cd2f2ba7ef5beabbdb51815cdebea9f458a82a2a16fe45abee88428b88cc5ee6
|
"""
A module that defines all SIESTA files known
"""
from copy import deepcopy
import sids.simulation as _sim
import sids.siesta.io as _sio
import sids.es as _es
import sids.helper.units as _unit
import sids.k as _k
import numpy as _np
import sparse as spar
class SparseMatrixError(Exception):
"""
Error handler for SIESTA sparse matrices
"""
pass
class SparseMatrix(_sim.SimulationFile):
"""
A wrapper class for the sparsity matrices in siesta
"""
def _option(self,method='dense'):
"""
Sets specific options that determines the working of this
sparse matrix.
Parameters
----------
method -- 'dense' or 'sparse' enables choice of algorithms used
"""
self.method = method
if method == 'dense':
try:
del self.s_ptr
del self.s_col
except:
pass
elif method == 'sparse':
self.s_ptr, self.s_col = spar.sparse_uc(self.no, self.n_col, self.l_ptr,
self.l_col)
def option(self,**opt):
"""
Enables specification options set.
As this should be extendable we set this as a method to be overwritten by
users.
Just remember to end your routine with "self._set(**opts)"
"""
self._option(**opt)
def _tosparse(self,k,m1,m2=None):
"""
Returns a csr sparse matrix at the specified k-point
"""
if not hasattr(self,'s_ptr'):
raise SparseMatrixError("Sparse method have not been initialized, call self.set(method='sparse')")
# convert k-point to current cell size
tk = _k.PI2 * _np.dot(k,self.rcell.T)
if hasattr(self,'offset'):
# Using the offset method does increase performance
# Hence it will be the preferred way.
return spar.tosparse_off(tk,self.no,
self.n_col,self.l_ptr,self.l_col,
self.offset,self.s_ptr,self.s_col,m1,m2)
return spar.tosparse(tk,self.no,
self.n_col,self.l_ptr,self.l_col,
self._xij,self.s_ptr,self.s_col,m1,m2)
def _todense(self,k,m1,m2=None):
""" Returns a dense matrix of this Hamiltonian at the specified
k-point"""
# convert k-point to current cell size
tk = _k.PI2 * _np.dot(k,self.rcell.T)
if hasattr(self,'offset'):
# Using the offset method does increase performance
# Hence it will be the preferred way.
return spar.todense_off(tk,self.no,
self.n_col,self.l_ptr,self.l_col,
self.offset,m1,m2)
return spar.todense(tk,self.no,
self.n_col,self.l_ptr,self.l_col,
self._xij,m1,m2)
def _correct_sparsity(self):
"""
Corrects the xij array and utilizes offsets instead.
"""
# Correct the xij array (remove xa[j]-xa[i])
spar.xij_correct(self.na, self.xa, self.lasto,
self.no, self.n_col, self.l_ptr, self.l_col,
self._xij)
# get transfer matrix sizes
tm = spar.xij_sc(self.rcell,self.nnzs,self._xij)
# Get the integer offsets for all supercells
#ioffset = spar.get_isupercells(tm)
# The supercell offsets (in Ang)
self.offset = spar.get_supercells(self.cell, tm)
self.add_clean('offset')
# Correct list_col (create the correct supercell index)
spar.list_col_correct(self.rcell, self.no, self.nnzs,
self.l_col, self._xij, tm)
class Hamiltonian(SparseMatrix,_es.Hamiltonian):
""" A wrapper class to ease the construction of several
Hamiltonian formats.
"""
# Default units (we convert in the FORTRAN routines)
_UNITS = _unit.Units('_H','eV','_xij','Ang')
def H(self,**kwargs):
if self.method == 'dense':
return self.todense(**kwargs)
else:
return self.tosparse(**kwargs)
def tosparse(self,k=_np.zeros((3,),_np.float64),spin=0,name=None):
""" Returns a sparse matrix of this Hamiltonian at the specified
k-point"""
if name is None:
return self._tosparse(k,self._H[spin,:],self._S)
elif name == 'H':
return self._tosparse(k,self._H[spin,:])
elif name == 'S':
return self._tosparse(k,self._S)
else:
raise SparseMatrixError("Error in name")
def todense(self,k=_np.zeros((3,),_np.float64),spin=0,name=None):
""" Returns a dense matrix of this Hamiltonian at the specified
k-point"""
if name is None:
return self._todense(k,self._H[spin,:],self._S)
elif name == 'H':
return self._todense(k,self._H[spin,:])
elif name == 'S':
return self._todense(k,self._S)
else:
raise Exception("Error in name")
def init_HSxij(self,read_header,read):
""" Initialization of the HSX file data type
"""
# Initialize the Hamiltonian object
self.init_hamiltonian(ortho=False)
self.gamma,self.nspin, self.no, self.no_s, \
self.nnzs = read_header(self.file_path)
self.add_clean('gamma','nspin','no','no_s','nnzs')
self.gamma = self.gamma != 0
n_col,list_ptr,list_col,H,S,xij = \
read(fname=self.file_path,
gamma=self.gamma,
no_u=self.no,no_s=self.no_s,
maxnh=self.nnzs,nspin=self.nspin)
# Correct contiguous
self.n_col = _np.require(n_col,requirements=['C','A'])
self.add_clean('n_col')
del n_col
self.l_ptr = _np.require(list_ptr,requirements=['C','A'])
self.add_clean('l_ptr')
del list_ptr
self.l_col = _np.require(list_col,requirements=['C','A']) - 1 # correct numpy indices
self.add_clean('l_col')
del list_col
self._H = _np.require(H.T,requirements=['C','A'])
self.add_clean('_H')
del H
self._H.shape = (self.nspin,self.nnzs)
self._S = _np.require(S,requirements=['C','A'])
self.add_clean('_S')
del S
self._xij = _np.require(xij.T,requirements=['C','A'])
self.add_clean('_xij')
del xij
self._xij.shape = (self.nnzs,3)
# Done reading in information
# If the simulation has the cell attached,
# fetch it and calculate the reciprocal cell
try:
self.rcell = _np.linalg.inv(self.cell)
self.add_clean('rcell')
except: pass
class HSX(Hamiltonian):
""" The HSX file that contains the Hamiltonian, overlap and
xij
"""
def init_file(self):
""" Initialization of the HSX file data type
"""
self.init_HSxij(_sio.read_hsx_header,_sio.read_hsx)
class SE_HSX(_es.SelfEnergy,HSX):
"""
A self-energy construct from the HSX file
"""
def init_file(self):
super(HSX, self).init_file()
self.init_SE()
class HS(Hamiltonian):
""" The HS file that contains the Hamiltonian, overlap and
xij
"""
def init_file(self):
""" Initialization of the HS file data type
"""
self.init_HSxij(_sio.read_hs_header,_sio.read_hs)
class SE_HS(_es.SelfEnergy,HS):
"""
A self-energy construct from the HS file
"""
def init_file(self):
super(HS, self).init_file()
self.init_SE()
class TSHS(Hamiltonian):
""" The TSHS file that contains the Hamiltonian, overlap and
xij
"""
_UNITS = _unit.Units('H','eV','cell','Ang','xa','Ang','Ef','eV')
def init_file(self):
""" Initialization of the TSHS file data type
"""
self.init_HSxij(_sio.read_tshs_header,_sio.read_tshs)
# Read extra information contained in TSHS
self.na, cell, self.Ef, self.Qtot, self.T = \
_sio.read_tshs_header_extra(self.file_path)
self.add_clean('na')
self.cell = _np.require(cell.T,requirements=['C','A'])
self.cell.shape = (3,3)
self.add_clean('cell')
del cell
self.rcell = _np.linalg.inv(self.cell)
self.add_clean('rcell')
# We add the cell size to the simulation
self.sim.add_var('na',self.na)
self.sim.add_var('cell',self.cell,self._units.unit('cell'))
self.lasto, xa = \
_sio.read_tshs_extra(self.file_path,na_u=self.na)
self.add_clean('lasto','xa')
# Create the lasto array
self.sim.add_var('lasto',self.lasto)
# Convert xa to C-array
self.xa = _np.require(xa.T,requirements=['C','A'])
self.xa.shape = (self.na,3)
del xa
# Create the coordinate
self.sim.add_var('xa',self.xa,self._units.unit('xa'))
# create offsets
self._correct_sparsity()
# Remove unneeded xij array.
if 'offset' in self.__dict__:
del self._xij
self.remove_clean('_xij')
class SE_TSHS(_es.SelfEnergy,TSHS):
"""
A self-energy construct from the HS file
"""
pass
class DensityMatrix(SparseMatrix):
""" A wrapper class to ease the construction of several
Hamiltonian formats.
"""
def todense(self,k=_np.zeros((3,),_np.float64),spin=0,name='D'):
""" Returns a dense matrix of this Hamiltonian at the specified
k-point"""
if name is None:
return self._todense(k,self.DM[spin,:],self.EM)
elif name in ['DM','D']:
return self._todense(k,self.DM[spin,:])
elif name in ['EM','E']:
return self._todense(k,self.EM)
else:
raise Exception("Error in name")
class DM(DensityMatrix):
"""The density matrix file
"""
def init_file(self):
""" Initialization of the DM file data type
"""
self.nspin, self.no, self.nnzs = _sio.read_dm_header(self.file_path)
n_col,list_ptr,list_col,DM = \
_sio.read_dm(fname=self.file_path,
no_u=self.no, maxnd=self.nnzs,nspin=self.nspin)
self.add_clean('nspin','no','nnzs')
# Correct contiguous
self.n_col = _np.require(n_col,requirements=['C','A'])
self.add_clean('n_col')
del n_col
self.l_ptr = _np.require(list_ptr,requirements=['C','A'])
self.add_clean('l_ptr')
del list_ptr
self.l_col = _np.require(list_col,requirements=['C','A']) - 1 # correct numpy indices
self.add_clean('l_col')
del list_col
self.DM = _np.require(DM.T,requirements=['C','A'])
self.add_clean('DM')
del DM
self.DM.shape = (self.nspin,self.nnzs)
# Done reading in information
|
zerothi/siesta-es
|
sids/siesta/files.py
|
Python
|
gpl-3.0
| 10,999
|
[
"SIESTA"
] |
66a0fa5ec2430a94f494dfb9b24e5c8f1de5f93f9daf101f46d515f8fdaea362
|
from cmath import exp
from curses.ascii import SO
class Solution:
def shortestPathLength(self, graph: list[list[int]]) -> int:
memo, final, steps = set(), (1 << len(graph)) - 1, 0
queue = [(i, 1 << i) for i in range(len(graph))]
while True:
new = []
for node, state in queue:
if state == final:
return steps
for v in graph[node]:
if (v, state | 1 << v) not in memo:
new.append((v, state | 1 << v))
memo.add((v, state | 1 << v))
queue = new
steps += 1
# TESTS
for graph, expected in [
([[1, 2, 3], [0], [0], [0]], 4),
([[1], [0, 2, 4], [1, 3, 4], [2], [1, 2]], 4),
]:
sol = Solution()
actual = sol.shortestPathLength(graph)
print("Shortest path to visit all nodes in", graph, "->", actual)
assert actual == expected
|
l33tdaima/l33tdaima
|
p847h/shorest_path_length.py
|
Python
|
mit
| 939
|
[
"VisIt"
] |
4d0902372c170bd4cf751570d6d3332af7583b6be0636465e14875e5ac30396a
|
"""Install next gen sequencing analysis tools not currently packaged.
"""
import os
import re
from fabric.api import *
from fabric.contrib.files import *
import yaml
from shared import (_if_not_installed, _make_tmp_dir,
_get_install, _get_install_local, _make_copy, _configure_make,
_java_install, _python_cmd,
_symlinked_java_version_dir, _fetch_and_unpack, _python_make,
_get_lib_dir, _get_include_dir, _apply_patch)
from cloudbio.custom import shared, versioncheck
from cloudbio import libraries
from cloudbio.flavor.config import get_config_file
@_if_not_installed(["twoBitToFa", "gtfToGenePred"])
def install_ucsc_tools(env):
"""Useful executables from UCSC.
todo: install from source to handle 32bit and get more programs
http://hgdownload.cse.ucsc.edu/admin/jksrc.zip
"""
tools = ["liftOver", "faToTwoBit", "bedToBigBed",
"bigBedInfo", "bigBedSummary", "bigBedToBed",
"bedGraphToBigWig", "bigWigInfo", "bigWigSummary",
"bigWigToBedGraph", "bigWigToWig",
"fetchChromSizes", "wigToBigWig", "faSize", "twoBitInfo",
"twoBitToFa", "faCount", "gtfToGenePred"]
url = "http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/"
_download_executables(env, url, tools)
@_if_not_installed("blat")
def install_kent_tools(env):
"""
Please note that the Blat source and executables are freely available for
academic, nonprofit and personal use. Commercial licensing information is
available on the Kent Informatics website (http://www.kentinformatics.com/).
"""
tools = ["blat", "gfClient", "gfServer"]
url = "http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/blat/"
_download_executables(env, url, tools)
def _download_executables(env, base_url, tools):
install_dir = shared._get_bin_dir(env)
with _make_tmp_dir() as work_dir:
with cd(work_dir):
for tool in tools:
final_tool = os.path.join(install_dir, tool)
if not env.safe_exists(final_tool) and shared._executable_not_on_path(tool):
shared._remote_fetch(env, "%s%s" % (base_url, tool))
env.safe_sudo("cp -f %s %s" % (tool, install_dir))
final_path = os.path.join(install_dir, tool)
env.safe_sudo("chmod uga+rx %s" % final_path)
# --- Alignment tools
def install_featurecounts(env):
"""
featureCounts from the subread package for counting reads mapping to
genomic features
"""
default_version = "1.4.4"
version = env.get("tool_version", default_version)
if versioncheck.up_to_date(env, "featureCounts", version, stdout_flag="Version"):
return
platform = "MacOS" if env.distribution == "macosx" else "Linux"
url = ("http://downloads.sourceforge.net/project/subread/"
"subread-%s/subread-%s-%s-x86_64.tar.gz"
% (version, version, platform))
_get_install(url, env, _make_copy("find . -type f -perm -100 -name 'featureCounts'",
do_make=False))
@_if_not_installed("bowtie")
def install_bowtie(env):
"""The bowtie short read aligner.
http://bowtie-bio.sourceforge.net/index.shtml
"""
default_version = "1.0.0"
version = env.get("tool_version", default_version)
url = "http://downloads.sourceforge.net/project/bowtie-bio/bowtie/%s/" \
"bowtie-%s-src.zip" % (version, version)
_get_install(url, env, _make_copy("find . -perm -100 -name 'bowtie*'"))
@_if_not_installed("bowtie2")
def install_bowtie2(env):
"""bowtie2 short read aligner, with gap support.
http://bowtie-bio.sourceforge.net/bowtie2/index.shtml
"""
default_version = "2.1.0"
version = env.get("tool_version", default_version)
url = "http://downloads.sourceforge.net/project/bowtie-bio/bowtie2/%s/" \
"bowtie2-%s-source.zip" % (version, version)
_get_install(url, env, _make_copy("find . -perm -100 -name 'bowtie2*'"))
@_if_not_installed("bfast")
def install_bfast(env):
"""BFAST: Blat-like Fast Accurate Search Tool.
http://sourceforge.net/apps/mediawiki/bfast/index.php?title=Main_Page
"""
default_version = "0.7.0a"
version = env.get("tool_version", default_version)
major_version_regex = "\d+\.\d+\.\d+"
major_version = re.search(major_version_regex, version).group(0)
url = "http://downloads.sourceforge.net/project/bfast/bfast/%s/bfast-%s.tar.gz"\
% (major_version, version)
_get_install(url, env, _configure_make)
@_if_not_installed("perm")
def install_perm(env):
"""Efficient mapping of short sequences accomplished with periodic full sensitive spaced seeds.
https://code.google.com/p/perm/
"""
default_version = "4"
version = env.get("tool_version", default_version)
url = "http://perm.googlecode.com/files/PerM%sSource.tar.gz" % version
def gcc44_makefile_patch():
gcc_cmd = "g++44"
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True):
result = env.safe_run("%s -v" % gcc_cmd)
print result.return_code
if result.return_code == 0:
env.safe_sed("makefile", "g\+\+", gcc_cmd)
_get_install(url, env, _make_copy("ls -1 perm", gcc44_makefile_patch))
@_if_not_installed("snap")
def install_snap(env):
"""Scalable Nucleotide Alignment Program
http://snap.cs.berkeley.edu/
"""
version = "0.15"
url = "http://github.com/downloads/amplab/snap/" \
"snap-%s-linux.tar.gz" % version
_get_install(url, env, _make_copy("find . -perm -100 -type f", do_make=False))
def install_stampy(env):
"""Stampy: mapping of short reads from illumina sequencing machines onto a reference genome.
http://www.well.ox.ac.uk/project-stampy
"""
version = "1.0.21"
#version = base_version
#revision = "1654"
#version = "{0}r{1}".format(base_version, revision)
#url = "http://www.well.ox.ac.uk/bioinformatics/Software/" \
# "stampy-%s.tgz" % (version)
# Ugh -- Stampy now uses a 'Stampy-latest' download target
url = "http://www.well.ox.ac.uk/bioinformatics/Software/" \
"Stampy-latest.tgz"
def _clean_makefile(env):
env.safe_sed("makefile", " -Wl", "")
_get_install_local(url, env, _make_copy(),
dir_name="stampy-{0}".format(version),
post_unpack_fn=_clean_makefile)
@_if_not_installed("gmap")
def install_gmap(env):
"""GMAP and GSNAP: A Genomic Mapping and Alignment Program for mRNA EST and short reads.
http://research-pub.gene.com/gmap/
"""
version = "2012-11-09"
url = "http://research-pub.gene.com/gmap/src/gmap-gsnap-%s.tar.gz" % version
_get_install(url, env, _configure_make)
def _wget_with_cookies(ref_url, dl_url):
env.safe_run("wget --cookies=on --keep-session-cookies --save-cookies=cookie.txt %s"
% (ref_url))
env.safe_run("wget --referer=%s --cookies=on --load-cookies=cookie.txt "
"--keep-session-cookies --save-cookies=cookie.txt %s" %
(ref_url, dl_url))
@_if_not_installed("novoalign")
def install_novoalign(env):
"""Novoalign short read aligner using Needleman-Wunsch algorithm with affine gap penalties.
http://www.novocraft.com/main/index.php
"""
base_version = "V3.00.02"
cs_version = "V1.03.02"
_url = "http://www.novocraft.com/downloads/%s/" % base_version
ref_url = "http://www.novocraft.com/main/downloadpage.php"
base_url = "%s/novocraft%s.gcc.tar.gz" % (_url, base_version)
cs_url = "%s/novoalignCS%s.gcc.tar.gz" % (_url, cs_version)
install_dir = shared._get_bin_dir(env)
with _make_tmp_dir() as work_dir:
with cd(work_dir):
_wget_with_cookies(ref_url, base_url)
env.safe_run("tar -xzvpf novocraft%s.gcc.tar.gz" % base_version)
with cd("novocraft"):
for fname in ["isnovoindex", "novo2maq", "novo2paf",
"novo2sam.pl", "novoalign", "novobarcode",
"novoindex", "novope2bed.pl", "novorun.pl",
"novoutil"]:
env.safe_sudo("mv %s %s" % (fname, install_dir))
with _make_tmp_dir() as work_dir:
with cd(work_dir):
_wget_with_cookies(ref_url, cs_url)
env.safe_run("tar -xzvpf novoalignCS%s.gcc.tar.gz" % cs_version)
with cd("novoalignCS"):
for fname in ["novoalignCS"]:
env.safe_sudo("mv %s %s" % (fname, install_dir))
@_if_not_installed("novosort")
def install_novosort(env):
"""Multithreaded sort and merge for BAM files.
http://www.novocraft.com/wiki/tiki-index.php?page=Novosort
"""
base_version = "V3.00.02"
version = "V1.00.02"
url = "http://www.novocraft.com/downloads/%s/novosort%s.gcc.tar.gz" % (base_version, version)
ref_url = "http://www.novocraft.com/main/downloadpage.php"
install_dir = shared._get_bin_dir(env)
with _make_tmp_dir() as work_dir:
with cd(work_dir):
_wget_with_cookies(ref_url, url)
env.safe_run("tar -xzvpf novosort%s.gcc.tar.gz" % version)
with cd("novosort"):
for fname in ["novosort"]:
env.safe_sudo("mv %s %s" % (fname, install_dir))
@_if_not_installed("lastz")
def install_lastz(env):
"""LASTZ sequence alignment program.
http://www.bx.psu.edu/miller_lab/dist/README.lastz-1.02.00/README.lastz-1.02.00a.html
"""
default_version = "1.02.00"
version = env.get("tool_version", default_version)
url = "http://www.bx.psu.edu/miller_lab/dist/" \
"lastz-%s.tar.gz" % version
def _remove_werror(env):
env.safe_sed("src/Makefile", " -Werror", "")
_get_install(url, env, _make_copy("find . -perm -100 -name 'lastz'"),
post_unpack_fn=_remove_werror)
@_if_not_installed("MosaikAligner")
def install_mosaik(env):
"""MOSAIK: reference-guided aligner for next-generation sequencing technologies
http://code.google.com/p/mosaik-aligner/
"""
version = "2.1.73"
url = "http://mosaik-aligner.googlecode.com/files/" \
"MOSAIK-%s-binary.tar" % version
_get_install(url, env, _make_copy("find . -perm -100 -type f", do_make=False))
# --- Utilities
def install_samtools(env):
"""SAM Tools provide various utilities for manipulating alignments in the SAM format.
http://samtools.sourceforge.net/
"""
default_version = "0.1.19"
version = env.get("tool_version", default_version)
if versioncheck.up_to_date(env, "samtools", version, stdout_flag="Version:"):
env.logger.info("samtools version {0} is up to date; not installing"
.format(version))
return
url = "http://downloads.sourceforge.net/project/samtools/samtools/" \
"%s/samtools-%s.tar.bz2" % (version, version)
def _safe_ncurses_make(env):
"""Combine samtools, removing ncurses refs if not present on system.
"""
with settings(warn_only=True):
result = env.safe_run("make")
# no ncurses, fix Makefile and rebuild
if result.failed:
env.safe_sed("Makefile", "-D_CURSES_LIB=1", "-D_CURSES_LIB=0")
env.safe_sed("Makefile", "-lcurses", "# -lcurses")
env.safe_run("make clean")
env.safe_run("make")
install_dir = shared._get_bin_dir(env)
for fname in env.safe_run_output("ls -1 samtools bcftools/bcftools bcftools/vcfutils.pl misc/wgsim").split("\n"):
env.safe_sudo("cp -f %s %s" % (fname.rstrip("\r"), install_dir))
_get_install(url, env, _safe_ncurses_make)
def install_gemini(env):
"""A lightweight db framework for disease and population genetics.
https://github.com/arq5x/gemini
"""
version = "0.7.0"
if versioncheck.up_to_date(env, "gemini -v", version, stdout_flag="gemini"):
return
elif not shared._executable_not_on_path("gemini -v"):
env.safe_run("gemini update")
else:
iurl = "https://raw.github.com/arq5x/gemini/master/gemini/scripts/gemini_install.py"
data_dir = os.path.join(env.system_install,
"local" if env.system_install.find("/local") == -1 else "",
"share", "gemini")
with _make_tmp_dir(ext="-gemini") as work_dir:
with cd(work_dir):
if env.safe_exists(os.path.basename(iurl)):
env.safe_run("rm -f %s" % os.path.basename(iurl))
installer = shared._remote_fetch(env, iurl)
env.safe_run("%s %s %s %s %s" %
(_python_cmd(env), installer, "" if env.use_sudo else "--nosudo",
env.system_install, data_dir))
env.safe_run("rm -f gemini_install.py")
@_if_not_installed("vtools")
def install_varianttools(env):
"""Annotation, selection, and analysis of variants in the context of next-gen sequencing analysis.
http://varianttools.sourceforge.net/
"""
version = "1.0.6"
url = "http://downloads.sourceforge.net/project/varianttools/" \
"{ver}/variant_tools-{ver}-src.tar.gz".format(ver=version)
_get_install(url, env, _python_make)
@_if_not_installed("dwgsim")
def install_dwgsim(env):
"""DWGSIM: simulating NGS data and evaluating mappings and variant calling.
http://sourceforge.net/apps/mediawiki/dnaa/index.php?title=Main_Page
"""
version = "0.1.10"
samtools_version = "0.1.18"
url = "http://downloads.sourceforge.net/project/dnaa/dwgsim/" \
"dwgsim-{0}.tar.gz".format(version)
samtools_url = "http://downloads.sourceforge.net/project/samtools/samtools/" \
"{ver}/samtools-{ver}.tar.bz2".format(ver=samtools_version)
def _get_samtools(env):
shared._remote_fetch(env, samtools_url)
env.safe_run("tar jxf samtools-{0}.tar.bz2".format(samtools_version))
env.safe_run("ln -s samtools-{0} samtools".format(samtools_version))
_get_install(url, env, _make_copy("ls -1 dwgsim dwgsim_eval scripts/dwgsim_pileup_eval.pl"),
post_unpack_fn=_get_samtools)
@_if_not_installed("fastq_screen")
def install_fastq_screen(env):
"""A screening application for high througput sequence data.
http://www.bioinformatics.babraham.ac.uk/projects/fastq_screen/
"""
version = "0.4"
url = "http://www.bioinformatics.babraham.ac.uk/projects/fastq_screen/" \
"fastq_screen_v%s.tar.gz" % version
install_dir = shared._symlinked_shared_dir("fastqc_screen", version, env)
executable = "fastq_screen"
if install_dir:
with _make_tmp_dir() as work_dir:
with cd(work_dir):
out_file = shared._remote_fetch(env, url)
env.safe_run("tar -xzvpf %s" % out_file)
with cd("fastq_screen_v%s" % version):
env.safe_sudo("mv * %s" % install_dir)
env.safe_sudo("ln -s %s/%s %s/bin/%s" % (install_dir, executable,
env.system_install, executable))
def install_bedtools(env):
"""A flexible suite of utilities for comparing genomic features.
https://code.google.com/p/bedtools/
"""
version = "2.17.0"
if versioncheck.up_to_date(env, "bedtools --version", version, stdout_flag="bedtools"):
return
url = "https://bedtools.googlecode.com/files/" \
"BEDTools.v%s.tar.gz" % version
_get_install(url, env, _make_copy("ls -1 bin/*"))
_shrec_run = """
#!/usr/bin/perl
use warnings;
use strict;
use FindBin qw($RealBin);
use Getopt::Long;
my @java_args;
my @args;
foreach (@ARGV) {
if (/^\-X/) {push @java_args,$_;}
else {push @args,$_;}}
system("java -cp $RealBin @java_args Shrec @args");
"""
@_if_not_installed("shrec")
def install_shrec(env):
"""Shrec is a bioinformatics tool for error correction of HTS read data.
http://sourceforge.net/projects/shrec-ec/
"""
version = "2.2"
url = "http://downloads.sourceforge.net/project/shrec-ec/SHREC%%20%s/bin.zip" % version
install_dir = _symlinked_java_version_dir("shrec", version, env)
if install_dir:
shrec_script = "%s/shrec" % install_dir
with _make_tmp_dir() as work_dir:
with cd(work_dir):
out_file = shared._remote_fetch(env, url)
env.safe_run("unzip %s" % out_file)
env.safe_sudo("mv *.class %s" % install_dir)
for line in _shrec_run.split("\n"):
if line.strip():
env.safe_append(shrec_script, line, use_sudo=env.use_sudo)
env.safe_sudo("chmod a+rwx %s" % shrec_script)
env.safe_sudo("ln -s %s %s/bin/shrec" % (shrec_script, env.system_install))
def install_echo(env):
"""ECHO: A reference-free short-read error correction algorithm
http://uc-echo.sourceforge.net/
"""
version = "1_12"
url = "http://downloads.sourceforge.net/project/uc-echo/source%20release/" \
"echo_v{0}.tgz".format(version)
_get_install_local(url, env, _make_copy())
# -- Analysis
def install_picard(env):
"""Command-line utilities that manipulate BAM files with a Java API.
http://picard.sourceforge.net/
"""
version = "1.96"
url = "http://downloads.sourceforge.net/project/picard/" \
"picard-tools/%s/picard-tools-%s.zip" % (version, version)
_java_install("picard", version, url, env)
def install_alientrimmer(env):
"""
Adapter removal tool
http://www.ncbi.nlm.nih.gov/pubmed/23912058
"""
version = "0.3.2"
url = ("ftp://ftp.pasteur.fr/pub/gensoft/projects/AlienTrimmer/"
"AlienTrimmer_%s.tar.gz" % version)
_java_install("AlienTrimmer", version, url, env)
def install_rnaseqc(env):
"""Quality control metrics for RNA-seq data
https://www.broadinstitute.org/cancer/cga/rna-seqc
"""
version = "1.1.7"
url = ("https://github.com/chapmanb/RNA-SeQC/releases/download/"
"v%s/RNA-SeQC_v%s.jar" % (version, version))
install_dir = _symlinked_java_version_dir("RNA-SeQC", version, env)
if install_dir:
with _make_tmp_dir() as work_dir:
with cd(work_dir):
out_file = shared._remote_fetch(env, url)
env.safe_sudo("mv %s %s" % (out_file, install_dir))
def install_varscan(env):
"""Variant detection in massively parallel sequencing data
http://varscan.sourceforge.net/
"""
version = "2.3.7"
url = "http://downloads.sourceforge.net/project/varscan/VarScan.v%s.jar" % version
install_dir = _symlinked_java_version_dir("varscan", version, env)
if install_dir:
with _make_tmp_dir() as work_dir:
with cd(work_dir):
out_file = shared._remote_fetch(env, url)
env.safe_sudo("mv %s %s" % (out_file, install_dir))
def install_mutect(env):
version = "1.1.5"
url = "https://github.com/broadinstitute/mutect/releases/download/" \
"%s/muTect-%s-bin.zip" % (version, version)
install_dir = _symlinked_java_version_dir("mutect", version, env)
if install_dir:
with _make_tmp_dir() as work_dir:
with cd(work_dir):
out_file = shared._remote_fetch(env, url)
env.safe_run("unzip %s" % out_file)
env.safe_sudo("mv *.jar version.txt LICENSE* %s" % install_dir)
@_if_not_installed("bam")
def install_bamutil(env):
"""Utilities for working with BAM files, from U of M Center for Statistical Genetics.
http://genome.sph.umich.edu/wiki/BamUtil
"""
version = "1.0.7"
url = "http://genome.sph.umich.edu/w/images/5/5d/BamUtilLibStatGen.%s.tgz" % version
_get_install(url, env, _make_copy("ls -1 bamUtil/bin/bam"),
dir_name="bamUtil_%s" % version)
@_if_not_installed("tabix")
def install_tabix(env):
"""Generic indexer for TAB-delimited genome position files
http://samtools.sourceforge.net/tabix.shtml
"""
version = "0.2.6"
url = "http://downloads.sourceforge.net/project/samtools/tabix/tabix-%s.tar.bz2" % version
_get_install(url, env, _make_copy("ls -1 tabix bgzip"))
@_if_not_installed("disambiguate.py")
def install_disambiguate(env):
"""a tool for disambiguating reads aligning to multiple genomes
https://github.com:mjafin/disambiguate
"""
repository = "git clone https://github.com/mjafin/disambiguate.git"
_get_install(repository, env, _python_make)
def install_grabix(env):
"""a wee tool for random access into BGZF files
https://github.com/arq5x/grabix
"""
version = "0.1.6"
revision = "ba792bc872d38d3cb5a69b2de00e39a6ac367d69"
try:
uptodate = versioncheck.up_to_date(env, "grabix", version, stdout_flag="version:")
# Old versions will not have any version information
except IOError:
uptodate = False
if uptodate:
return
repository = "git clone https://github.com/arq5x/grabix.git"
_get_install(repository, env, _make_copy("ls -1 grabix"),
revision=revision)
@_if_not_installed("pbgzip")
def install_pbgzip(env):
"""Parallel blocked bgzip -- compatible with bgzip but with thread support.
https://github.com/nh13/samtools/tree/master/pbgzip
"""
repository = "git clone https://github.com/chapmanb/samtools.git"
revision = "2cce3ffa97"
def _build(env):
with cd("pbgzip"):
env.safe_run("make")
install_dir = shared._get_bin_dir(env)
env.safe_sudo("cp -f pbgzip %s" % (install_dir))
_get_install(repository, env, _build, revision=revision)
@_if_not_installed("bamtools")
def install_bamtools(env):
"""command-line toolkit for working with BAM data
https://github.com/pezmaster31/bamtools
"""
version = "3fe66b9"
repository = "git clone --recursive https://github.com/pezmaster31/bamtools.git"
def _cmake_bamtools(env):
env.safe_run("mkdir build")
with cd("build"):
env.safe_run("cmake ..")
env.safe_run("make")
env.safe_sudo("cp bin/* %s" % shared._get_bin_dir(env))
env.safe_sudo("cp lib/* %s" % shared._get_lib_dir(env))
_get_install(repository, env, _cmake_bamtools,
revision=version)
@_if_not_installed("ogap")
def install_ogap(env):
"""gap opening realigner for BAM data streams
https://github.com/ekg/ogap
"""
version = "652c525"
repository = "git clone --recursive https://github.com/ekg/ogap.git"
_get_install(repository, env, _make_copy("ls ogap"),
revision=version)
def install_tophat(env):
"""TopHat is a fast splice junction mapper for RNA-Seq reads
http://ccb.jhu.edu/software/tophat/index.shtml
"""
default_version = "2.0.9"
version = env.get("tool_version", default_version)
if versioncheck.is_version(env, "tophat", version, args="--version", stdout_flag="TopHat"):
env.logger.info("tophat version {0} is up to date; not installing"
.format(version))
return
platform = "OSX" if env.distribution == "macosx" else "Linux"
url = "http://ccb.jhu.edu/software/tophat/downloads/" \
"tophat-%s.%s_x86_64.tar.gz" % (version, platform)
_get_install(url, env,
_make_copy("find . -perm -100 -type f", do_make=False))
install_tophat2 = install_tophat
# --- Assembly
@_if_not_installed("ABYSS")
def install_abyss(env):
"""Assembly By Short Sequences - a de novo, parallel, paired-end sequence assembler.
http://www.bcgsc.ca/platform/bioinfo/software/abyss
"""
# XXX check for no sparehash on non-ubuntu systems
default_version = "1.3.4"
version = env.get("tool_version", default_version)
url = "http://www.bcgsc.ca/downloads/abyss/abyss-%s.tar.gz" % version
def _remove_werror_get_boost(env):
env.safe_sed("configure", " -Werror", "")
# http://osdir.com/ml/abyss-users-science/2011-10/msg00108.html
url = "http://downloads.sourceforge.net/project/boost/boost/1.47.0/boost_1_47_0.tar.bz2"
dl_file = shared._remote_fetch(env, url)
env.safe_run("tar jxf %s" % dl_file)
env.safe_run("ln -s boost_1_47_0/boost boost")
_get_install(url, env, _configure_make, post_unpack_fn=_remove_werror_get_boost)
def install_transabyss(env):
"""Analyze ABySS multi-k-assembled shotgun transcriptome data.
http://www.bcgsc.ca/platform/bioinfo/software/trans-abyss
"""
version = "1.4.4"
url = "http://www.bcgsc.ca/platform/bioinfo/software/trans-abyss/" \
"releases/%s/trans-ABySS-v%s.tar.gz" % (version, version)
_get_install_local(url, env, _make_copy(do_make=False))
@_if_not_installed("velvetg")
def install_velvet(env):
"""Sequence assembler for very short reads.
http://www.ebi.ac.uk/~zerbino/velvet/
"""
default_version = "1.2.08"
version = env.get("tool_version", default_version)
url = "http://www.ebi.ac.uk/~zerbino/velvet/velvet_%s.tgz" % version
def _fix_library_order(env):
"""Fix library order problem in recent gcc versions
http://biostar.stackexchange.com/questions/13713/
error-installing-velvet-assembler-1-1-06-on-ubuntu-server
"""
env.safe_sed("Makefile", "Z_LIB_FILES=-lz", "Z_LIB_FILES=-lz -lm")
_get_install(url, env, _make_copy("find . -perm -100 -name 'velvet*'"),
post_unpack_fn=_fix_library_order)
@_if_not_installed("Ray")
def install_ray(env):
"""Ray -- Parallel genome assemblies for parallel DNA sequencing
http://denovoassembler.sourceforge.net/
"""
default_version = "2.2.0"
version = env.get("tool_version", default_version)
url = "http://downloads.sourceforge.net/project/denovoassembler/Ray-v%s.tar.bz2" % version
def _ray_do_nothing(env):
return
_get_install(url, env, _make_copy("find . -name Ray"),
post_unpack_fn=_ray_do_nothing)
def install_trinity(env):
"""Efficient and robust de novo reconstruction of transcriptomes from RNA-seq data.
http://trinityrnaseq.github.io/
"""
version = "2.0.2"
url = "https://github.com/trinityrnaseq/trinityrnaseq/archive/" \
"v%s.tar.gz" % version
dir_name = "trinityrnaseq-%s" % version
_get_install_local(url, env, _make_copy(),
dir_name=dir_name)
def install_cortex_var(env):
"""De novo genome assembly and variation analysis from sequence data.
http://cortexassembler.sourceforge.net/index_cortex_var.html
"""
version = "1.0.5.21"
url = "http://downloads.sourceforge.net/project/cortexassembler/cortex_var/" \
"latest/CORTEX_release_v{0}.tgz".format(version)
def _cortex_build(env):
env.safe_sed("Makefile", "\-L/full/path/\S*",
"-L{0}/lib -L/usr/lib -L/usr/local/lib".format(env.system_install))
env.safe_sed("Makefile", "^IDIR_GSL =.*$",
"IDIR_GSL={0}/include -I/usr/include -I/usr/local/include".format(env.system_install))
env.safe_sed("Makefile", "^IDIR_GSL_ALSO =.*$",
"IDIR_GSL_ALSO={0}/include/gsl -I/usr/include/gsl -I/usr/local/include/gsl".format(
env.system_install))
with cd("libs/gsl-1.15"):
env.safe_run("make clean")
with cd("libs/htslib"):
env.safe_run("make clean")
env.safe_run("make")
for cols in ["1", "2", "3", "4", "5"]:
for kmer in ["31", "63", "95"]:
env.safe_run("make MAXK={0} NUM_COLS={1} cortex_var".format(kmer, cols))
with cd("scripts/analyse_variants/needleman_wunsch"):
env.safe_sed("Makefile", "string_buffer.c", "string_buffer.c -lz")
# Fix incompatibilities with gzfile struct in zlib 1.2.6+
for fix_gz in ["libs/string_buffer/string_buffer.c", "libs/bioinf/bioinf.c",
"libs/string_buffer/string_buffer.h", "libs/bioinf/bioinf.h"]:
env.safe_sed(fix_gz, "gzFile \*", "gzFile ")
env.safe_sed(fix_gz, "gzFile\*", "gzFile")
env.safe_run("make")
_get_install_local(url, env, _cortex_build)
def install_bcbio_variation(env):
"""Toolkit to analyze genomic variation data with comparison and ensemble approaches.
https://github.com/chapmanb/bcbio.variation
"""
version = "0.2.6"
url = "https://github.com/chapmanb/bcbio.variation/releases/download/" \
"v%s/bcbio.variation-%s-standalone.jar" % (version, version)
install_dir = _symlinked_java_version_dir("bcbio_variation", version, env)
if install_dir:
with _make_tmp_dir() as work_dir:
with cd(work_dir):
jar_file = shared._remote_fetch(env, url)
env.safe_sudo("mv %s %s" % (jar_file, install_dir))
# --- ChIP-seq
@_if_not_installed("macs14")
def install_macs(env):
"""Model-based Analysis for ChIP-Seq.
http://liulab.dfci.harvard.edu/MACS/
"""
default_version = "1.4.2"
version = env.get("tool_version", default_version)
url = "https://github.com/downloads/taoliu/MACS/" \
"MACS-%s.tar.gz" % version
_get_install(url, env, _python_make)
# --- Structural variation
@_if_not_installed("hydra")
def install_hydra(env):
"""Hydra detects structural variation breakpoints in both unique and duplicated genomic regions.
https://code.google.com/p/hydra-sv/
"""
version = "0.5.3"
url = "http://hydra-sv.googlecode.com/files/Hydra.v{0}.tar.gz".format(version)
def clean_libs(env):
env.safe_run("make clean")
_get_install(url, env, _make_copy("ls -1 bin/* scripts/*"),
post_unpack_fn=clean_libs)
def install_freec(env):
"""Control-FREEC: a tool for detection of copy number changes and allelic imbalances.
http://bioinfo-out.curie.fr/projects/freec/
"""
version = "6.4"
if env.distribution in ["ubuntu", "debian"]:
if env.is_64bit:
url = "http://bioinfo-out.curie.fr/projects/freec/src/FREEC_Linux64.tar.gz"
else:
url = "http://bioinfo-out.curie.fr/projects/freec/src/FREEC_LINUX32.tar.gz"
if not versioncheck.up_to_date(env, "freec", version, stdout_index=1):
_get_install(url, env, _make_copy("find . -name 'freec'"), dir_name=".")
@_if_not_installed("CRISP.py")
def install_crisp(env):
"""Detect SNPs and short indels from pooled sequencing data.
https://sites.google.com/site/vibansal/software/crisp/
"""
version = "5"
url = "https://sites.google.com/site/vibansal/software/crisp/" \
"CRISP-linux-v{0}.tar.gz".format(version)
def _make_executable():
env.safe_run("chmod a+x *.py")
_get_install(url, env, _make_copy("ls -1 CRISP.py crisp_to_vcf.py",
premake_cmd=_make_executable,
do_make=False))
@_if_not_installed("run_pipeline.pl")
def install_tassel(env):
"""TASSEL: evaluate traits associations, evolutionary patterns, and linkage disequilibrium.
http://www.maizegenetics.net/index.php?option=com_content&task=view&id=89&/Itemid=119
"""
version = "5"
build_id = "1140d3fceb75"
url = "https://bitbucket.org/tasseladmin/tassel-{0}-standalone/get/{1}.zip".format(version, build_id)
executables = ["start_tassel.pl", "run_pipeline.pl"]
install_dir = _symlinked_java_version_dir("tassel", version, env)
if install_dir:
with _make_tmp_dir() as work_dir:
with cd(work_dir):
dl_file = shared._remote_fetch(env, url)
env.safe_run("unzip %s" % dl_file)
with cd("tasseladmin-tassel-{0}-standalone-{1}".format(version, build_id)):
for x in executables:
env.safe_sed(x, "^my \$top.*;",
"use FindBin qw($RealBin); my $top = $RealBin;")
env.safe_sudo("chmod a+rwx %s" % x)
env.safe_sudo("mv * %s" % install_dir)
for x in executables:
env.safe_sudo("ln -s %s/%s %s/bin/%s" % (install_dir, x,
env.system_install, x))
@_if_not_installed("ustacks")
def install_stacks(env):
"""Stacks: build loci out of a set of short-read sequenced samples.
http://creskolab.uoregon.edu/stacks/
"""
version = "0.9999"
url = "http://creskolab.uoregon.edu/stacks/source/" \
"stacks-{0}.tar.gz".format(version)
_get_install(url, env, _configure_make)
@_if_not_installed("seqlogo")
def install_weblogo(env):
"""Weblogo
http://weblogo.berkeley.edu/
"""
version = "2.8.2"
url = "http://weblogo.berkeley.edu/release/weblogo.%s.tar.gz" % version
_get_install(url, env, _make_copy("find . -perm -100 -type f", do_make=False))
def _cp_pm(env):
for perl_module in ["template.pm", "logo.pm", "template.eps"]:
env.safe_sudo("cp %s %s/lib/perl5" % (perl_module, env.system_install))
_get_install(url, env, _cp_pm(env))
|
heuermh/cloudbiolinux
|
cloudbio/custom/bio_nextgen.py
|
Python
|
mit
| 33,357
|
[
"Bowtie"
] |
a7e995bfaa3aced25a091e65c9396e8417ee21fa6cc70e3d2e4520885661552f
|
# Copyright 2013 by Leighton Pritchard. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""This module provides classes to represent a KGML Pathway Map.
The KGML definition is as of release KGML v0.7.1
(http://www.kegg.jp/kegg/xml/docs/)
Classes:
- Pathway Specifies graph information for the pathway map
- Relation Specifies a relationship between two proteins or
KOs, or protein and compound. There is an implied
direction to the relationship in some cases.
- Reaction A specific chemical reaction between a substrate
and a product.
- Entry A node in the pathway graph
- Graphics Entry subelement describing its visual
representation
"""
import time
from itertools import chain
from xml.dom import minidom
import xml.etree.ElementTree as ET
from Bio._py3k import _is_int_or_long, _as_string
# Pathway
class Pathway(object):
"""Represents a KGML pathway from KEGG.
Specifies graph information for the pathway map, as described in
release KGML v0.7.1 (http://www.kegg.jp/kegg/xml/docs/)
Attributes:
name KEGGID of the pathway map
org ko/ec/[org prefix]
number map number (integer)
title the map title
image URL of the image map for the pathway
link URL of information about the pathway
entries Dictionary of entries in the pathway, keyed by node ID
reactions Set of reactions in the pathway
The name attribute has a restricted format, so we make it a property and
enforce the formatting.
The Pathway object is the only allowed route for adding/removing
Entry, Reaction, or Relation elements.
Entries are held in a dictionary and keyed by the node ID for the
pathway graph - this allows for ready access via the Reaction/Relation
etc. elements. Entries must be added before reference by any other
element.
Reactions are held in a dictionary, keyed by node ID for the path.
The elements referred to in the reaction must be added before the
reaction itself.
"""
def __init__(self):
self._name = ''
self.org = ''
self._number = None
self.title = ''
self.image = ''
self.link = ''
self.entries = {}
self._reactions = {}
self._relations = set()
def get_KGML(self):
"""Return the pathway as a string in prettified KGML format."""
header = '\n'.join(['<?xml version="1.0"?>',
'<!DOCTYPE pathway SYSTEM ' +
'"http://www.genome.jp/kegg/xml/' +
'KGML_v0.7.1_.dtd">',
'<!-- Created by KGML_Pathway.py %s -->' %
time.asctime()])
rough_xml = header + _as_string(ET.tostring(self.element, 'utf-8'))
reparsed = minidom.parseString(rough_xml)
return reparsed.toprettyxml(indent=" ")
def add_entry(self, entry):
"""Add an Entry element to the pathway."""
# We insist that the node ID is an integer
assert _is_int_or_long(entry.id), \
"Node ID must be an integer, got %s (%s)" % (type(entry.id),
entry.id)
entry._pathway = self # Let the entry know about the pathway
self.entries[entry.id] = entry
def remove_entry(self, entry):
"""Remove an Entry element from the pathway."""
assert _is_int_or_long(entry.id), \
"Node ID must be an integer, got %s (%s)" % (type(entry.id),
entry.id)
# We need to remove the entry from any other elements that may
# contain it, which means removing those elements
# TODO
del self.entries[entry.id]
def add_reaction(self, reaction):
"""Add a Reaction element to the pathway."""
# We insist that the node ID is an integer and corresponds to an entry
assert _is_int_or_long(reaction.id), \
"Node ID must be an integer, got %s (%s)" % (type(reaction.id),
reaction.id)
assert reaction.id in self.entries, \
"Reaction ID %d has no corresponding entry" % reaction.id
reaction._pathway = self # Let the reaction know about the pathway
self._reactions[reaction.id] = reaction
def remove_reaction(self, reaction):
"""Remove a Reaction element from the pathway."""
assert _is_int_or_long(reaction.id), \
"Node ID must be an integer, got %s (%s)" % (type(reaction.id),
reaction.id)
# We need to remove the reaction from any other elements that may
# contain it, which means removing those elements
# TODO
del self._reactions[reaction.id]
def add_relation(self, relation):
"""Add a Relation element to the pathway."""
relation._pathway = self # Let the reaction know about the pathway
self._relations.add(relation)
def remove_relation(self, relation):
"""Remove a Relation element from the pathway."""
self._relations.remove(relation)
def __str__(self):
"""Returns a readable summary description string."""
outstr = ['Pathway: %s' % self.title,
'KEGG ID: %s' % self.name,
'Image file: %s' % self.image,
'Organism: %s' % self.org,
'Entries: %d' % len(self.entries),
'Entry types:']
for t in ['ortholog', 'enzyme', 'reaction',
'gene', 'group', 'compound', 'map']:
etype = [e for e in self.entries.values() if e.type == t]
if len(etype):
outstr.append('\t%s: %d' % (t, len(etype)))
return '\n'.join(outstr) + '\n'
# Assert correct formatting of the pathway name, and other attributes
def _getname(self):
return self._name
def _setname(self, value):
assert value.startswith('path:'), \
"Pathway name should begin with 'path:', got %s" % value
self._name = value
def _delname(self):
del self._name
name = property(_getname, _setname, _delname,
"The KEGGID for the pathway map.")
def _getnumber(self):
return self._number
def _setnumber(self, value):
self._number = int(value)
def _delnumber(self):
del self._number
number = property(_getnumber, _setnumber, _delnumber,
"The KEGG map number.")
@property
def compounds(self):
"""Get a list of entries of type compound."""
return [e for e in self.entries.values() if e.type == 'compound']
@property
def maps(self):
"""Get a list of entries of type map."""
return [e for e in self.entries.values() if e.type == 'map']
@property
def orthologs(self):
"""Get a list of entries of type ortholog."""
return [e for e in self.entries.values() if e.type == 'ortholog']
@property
def genes(self):
"""Get a list of entries of type gene."""
return [e for e in self.entries.values() if e.type == 'gene']
@property
def reactions(self):
"""Get a list of reactions in the pathway."""
return self._reactions.values()
@property
def reaction_entries(self):
"""Get a list of entries corresponding to each reaction
in the pathway.
"""
return [self.entries[i] for i in self._reactions]
@property
def relations(self):
"""Get a list of relations in the pathway."""
return list(self._relations)
@property
def element(self):
"""Return the Pathway as a valid KGML element."""
# The root is this Pathway element
pathway = ET.Element('pathway')
pathway.attrib = {'name': self._name,
'org': self.org,
'number': str(self._number),
'title': self.title,
'image': self.image,
'link': self.link,
}
# We add the Entries in node ID order
for eid, entry in sorted(self.entries.items()):
pathway.append(entry.element)
# Next we add Relations
for relation in self._relations:
pathway.append(relation.element)
for eid, reaction in sorted(self._reactions.items()):
pathway.append(reaction.element)
return pathway
@property
def bounds(self):
"""Coordinate bounds for all Graphics elements in the Pathway.
Returns the [(xmin, ymin), (xmax, ymax)] coordinates for all
Graphics elements in the Pathway
"""
xlist, ylist = [], []
for b in [g.bounds for g in self.entries.values()]:
xlist.extend([b[0][0], b[1][0]])
ylist.extend([b[0][1], b[1][1]])
return [(min(xlist), min(ylist)),
(max(xlist), max(ylist))]
# Entry
class Entry(object):
"""Represent an Entry from KGML.
Each Entry element is a node in the pathway graph, as described in
release KGML v0.7.1 (http://www.kegg.jp/kegg/xml/docs/)
Attributes:
- id The ID of the entry in the pathway map (integer)
- names List of KEGG IDs for the entry
- type The type of the entry
- link URL of information about the entry
- reaction List of KEGG IDs of the corresponding reactions
(integer)
- graphics List of Graphics objects describing the Entry's visual
representation
- components List of component node ID for this Entry ('group')
- alt List of alternate names for the Entry
NOTE: The alt attribute represents a subelement of the substrate and
product elements in the KGML file
"""
def __init__(self):
self._id = None
self._names = []
self.type = ''
self.image = ''
self.link = ''
self.graphics = []
self.components = set()
self.alt = []
self._pathway = None
self._reactions = []
def __str__(self):
"""Return readable descriptive string."""
outstr = ['Entry node ID: %d' % self.id,
'Names: %s' % self.name,
'Type: %s' % self.type,
'Components: %s' % self.components,
'Reactions: %s' % self.reaction,
'Graphics elements: %d %s' % (len(self.graphics),
self.graphics)]
return '\n'.join(outstr) + '\n'
def add_component(self, element):
"""Add an element to the entry.
If the Entry is already part of a pathway, make sure
the component already exists.
"""
if self._pathway is not None:
assert element.id in self._pathway.entries, \
"Component %s is not an entry in the pathway" % element.id
self.components.add(element)
def remove_component(self, value):
"""Remove the entry with the passed ID from the group."""
self.components.remove(value)
def add_graphics(self, entry):
"""Add the Graphics entry."""
self.graphics.append(entry)
def remove_graphics(self, entry):
"""Remove the Graphics entry with the passed ID from the group."""
self.graphics.remove(entry)
# Names may be given as a space-separated list of KEGG identifiers
def _getname(self):
return ' '.join(self._names)
def _setname(self, value):
self._names = value.split()
def _delname(self):
self._names = []
name = property(_getname, _setname, _delname,
"List of KEGG identifiers for the Entry.")
# Reactions may be given as a space-separated list of KEGG identifiers
def _getreaction(self):
return ' '.join(self._reactions)
def _setreaction(self, value):
self._reactions = value.split()
def _delreaction(self):
self._reactions = []
reaction = property(_getreaction, _setreaction, _delreaction,
"List of reaction KEGG IDs for this Entry.")
# We make sure that the node ID is an integer
def _getid(self):
return self._id
def _setid(self, value):
self._id = int(value)
def _delid(self):
del self._id
id = property(_getid, _setid, _delid,
"The pathway graph node ID for the Entry.")
@property
def element(self):
"""Return the Entry as a valid KGML element."""
# The root is this Entry element
entry = ET.Element('entry')
entry.attrib = {'id': str(self._id),
'name': self.name,
'link': self.link,
'type': self.type
}
if len(self._reactions):
entry.attrib['reaction'] = self.reaction
if len(self.graphics):
for g in self.graphics:
entry.append(g.element)
if len(self.components):
for c in self.components:
entry.append(c.element)
return entry
@property
def bounds(self):
"""Coordinate bounds for all Graphics elements in the Entry.
Return the [(xmin, ymin), (xmax, ymax)] co-ordinates for the Entry
Graphics elements.
"""
xlist, ylist = [], []
for b in [g.bounds for g in self.graphics]:
xlist.extend([b[0][0], b[1][0]])
ylist.extend([b[0][1], b[1][1]])
return [(min(xlist), min(ylist)),
(max(xlist), max(ylist))]
@property
def is_reactant(self):
"""Does this Entry participate in any reaction in parent pathway?
Returns True if the Entry participates in any reaction of its
parent Pathway
"""
for rxn in self._pathway.reactions:
if self._id in rxn.reactant_ids:
return True
return False
# Component
class Component(object):
"""An Entry subelement used to represents a complex node.
A subelement of the Entry element, used when the Entry is a complex
node, as described in release KGML v0.7.1
(http://www.kegg.jp/kegg/xml/docs/)
The Component acts as a collection (with type 'group', and typically
its own Graphics subelement), having only an ID.
"""
def __init__(self, parent):
self._id = None
self._parent = parent
# We make sure that the node ID is an integer
def _getid(self):
return self._id
def _setid(self, value):
self._id = int(value)
def _delid(self):
del self._id
id = property(_getid, _setid, _delid,
"The pathway graph node ID for the Entry")
@property
def element(self):
"""Return the Component as a valid KGML element."""
# The root is this Component element
component = ET.Element('component')
component.attrib = {'id': str(self._id)}
return component
# Graphics
class Graphics(object):
"""An Entry subelement used to represents the visual representation.
A subelement of Entry, specifying its visual representation, as
described in release KGML v0.7.1 (http://www.kegg.jp/kegg/xml/docs/)
Attributes:
name Label for the graphics object
x X-axis position of the object (int)
y Y-axis position of the object (int)
coords polyline co-ordinates, list of (int, int) tuples
type object shape
width object width (int)
height object height (int)
fgcolor object foreground color (hex RGB)
bgcolor object background color (hex RGB)
Some attributes are present only for specific graphics types. For
example, line types do not (typically) have a width.
We permit non-DTD attributes and attribute settings, such as
dash List of ints, describing an on/off pattern for dashes
"""
def __init__(self, parent):
self.name = ''
self._x = None
self._y = None
self._coords = None
self.type = ''
self._width = None
self._height = None
self.fgcolor = ''
self.bgcolor = ''
self._parent = parent
# We make sure that the XY coordinates, width and height are numbers
def _getx(self):
return self._x
def _setx(self, value):
self._x = float(value)
def _delx(self):
del self._x
x = property(_getx, _setx, _delx,
"The X coordinate for the graphics element.")
def _gety(self):
return self._y
def _sety(self, value):
self._y = float(value)
def _dely(self):
del self._y
y = property(_gety, _sety, _dely,
"The Y coordinate for the graphics element.")
def _getwidth(self):
return self._width
def _setwidth(self, value):
self._width = float(value)
def _delwidth(self):
del self._width
width = property(_getwidth, _setwidth, _delwidth,
"The width of the graphics element.")
def _getheight(self):
return self._height
def _setheight(self, value):
self._height = float(value)
def _delheight(self):
del self._height
height = property(_getheight, _setheight, _delheight,
"The height of the graphics element.")
# We make sure that the polyline co-ordinates are integers, too
def _getcoords(self):
return self._coords
def _setcoords(self, value):
clist = [int(e) for e in value.split(',')]
self._coords = [tuple(clist[i:i + 2]) for i in range(0, len(clist), 2)]
def _delcoords(self):
del self._coords
coords = property(_getcoords, _setcoords, _delcoords,
"Polyline coordinates for the graphics element.")
# Set default colors
def _getfgcolor(self):
return self._fgcolor
def _setfgcolor(self, value):
if value == 'none':
self._fgcolor = '#000000' # this default defined in KGML spec
else:
self._fgcolor = value
def _delfgcolor(self):
del self._fgcolor
fgcolor = property(_getfgcolor, _setfgcolor, _delfgcolor,
"Foreground color.")
def _getbgcolor(self):
return self._bgcolor
def _setbgcolor(self, value):
if value == 'none':
self._bgcolor = '#000000' # this default defined in KGML spec
else:
self._bgcolor = value
def _delbgcolor(self):
del self._bgcolor
bgcolor = property(_getbgcolor, _setbgcolor, _delbgcolor,
"Background color.")
@property
def element(self):
"""Return the Graphics as a valid KGML element."""
# The root is this Component element
graphics = ET.Element('graphics')
if isinstance(self.fgcolor, str): # Assumes that string is hexstring
fghex = self.fgcolor
else: # Assumes ReportLab Color object
fghex = '#' + self.fgcolor.hexval()[2:]
if isinstance(self.bgcolor, str): # Assumes that string is hexstring
bghex = self.bgcolor
else: # Assumes ReportLab Color object
bghex = '#' + self.bgcolor.hexval()[2:]
graphics.attrib = {'name': self.name,
'type': self.type,
'fgcolor': fghex,
'bgcolor': bghex}
for (n, attr) in [('x', '_x'), ('y', '_y'),
('width', '_width'), ('height', '_height')]:
if getattr(self, attr) is not None:
graphics.attrib[n] = str(getattr(self, attr))
if self.type == 'line': # Need to write polycoords
graphics.attrib['coords'] = \
','.join([str(e) for e in chain.from_iterable(self.coords)])
return graphics
@property
def bounds(self):
"""Coordinate bounds for the Graphics element.
Return the bounds of the Graphics object as an [(xmin, ymin),
(xmax, ymax)] tuple. Co-ordinates give the centre of the
circle, rectangle, roundrectangle elements, so we have to
adjust for the relevant width/height.
"""
if self.type == 'line':
xlist = [x for x, y in self.coords]
ylist = [y for x, y in self.coords]
return [(min(xlist), min(ylist)),
(max(xlist), max(ylist))]
else:
return [(self.x - self.width * 0.5, self.y - self.height * 0.5),
(self.x + self.width * 0.5, self.y + self.height * 0.5)]
@property
def centre(self):
"""Return the centre of the Graphics object as an (x, y) tuple."""
return (0.5 * (self.bounds[0][0] + self.bounds[1][0]),
0.5 * (self.bounds[0][1] + self.bounds[1][1]))
# Reaction
class Reaction(object):
"""A specific chemical reaction with substrates and products.
This describes a specific chemical reaction between one or more
substrates and one or more products.
Attributes:
id Pathway graph node ID of the entry
names List of KEGG identifier(s) from the REACTION database
type String: reversible or irreversible
substrate Entry object of the substrate
product Entry object of the product
"""
def __init__(self):
self._id = None
self._names = []
self.type = ''
self._substrates = set()
self._products = set()
self._pathway = None
def __str__(self):
"""Return an informative human-readable string."""
outstr = ['Reaction node ID: %s' % self.id,
'Reaction KEGG IDs: %s' % self.name,
'Type: %s' % self.type,
'Substrates: %s' %
','.join([s.name for s in self.substrates]),
'Products: %s' %
','.join([s.name for s in self.products]),
]
return '\n'.join(outstr) + '\n'
def add_substrate(self, substrate_id):
"""Add a substrate, identified by its node ID, to the reaction."""
if self._pathway is not None:
assert int(substrate_id) in self._pathway.entries, \
"Couldn't add substrate, no node ID %d in Pathway" % \
int(substrate_id)
self._substrates.add(substrate_id)
def add_product(self, product_id):
"""Add a product, identified by its node ID, to the reaction."""
if self._pathway is not None:
assert int(product_id) in self._pathway.entries, \
"Couldn't add product, no node ID %d in Pathway" % product_id
self._products.add(int(product_id))
# The node ID is also the node ID of the Entry that corresponds to the
# reaction; we get the corresponding Entry when there is an associated
# Pathway
def _getid(self):
return self._id
def _setid(self, value):
self._id = int(value)
def _delid(self):
del self._id
id = property(_getid, _setid, _delid,
"Node ID for the reaction.")
# Names may show up as a space-separated list of several KEGG identifiers
def _getnames(self):
return ' '.join(self._names)
def _setnames(self, value):
self._names.extend(value.split())
def _delnames(self):
del self.names
name = property(_getnames, _setnames, _delnames,
"List of KEGG identifiers for the reaction.")
# products and substrates are read-only properties, returning lists
# of Entry objects
@property
def substrates(self):
"""Return list of substrate Entry elements."""
return [self._pathway.entries[sid] for sid in self._substrates]
@property
def products(self):
"""Return list of product Entry elements."""
return [self._pathway.entries[pid] for pid in self._products]
@property
def entry(self):
"""Return the Entry corresponding to this reaction."""
return self._pathway.entries[self._id]
@property
def reactant_ids(self):
"""Return a list of substrate and product reactant IDs."""
return self._products.union(self._substrates)
@property
def entry(self):
"""Return the Entry corresponding to this reaction."""
return self._pathway.entries[self._id]
@property
def element(self):
"""Return KGML element describing the Reaction."""
# The root is this Relation element
reaction = ET.Element('reaction')
reaction.attrib = {'id': str(self.id),
'name': self.name,
'type': self.type}
for s in self._substrates:
substrate = ET.Element('substrate')
substrate.attrib['id'] = str(s)
substrate.attrib['name'] = self._pathway.entries[s].name
reaction.append(substrate)
for p in self._products:
product = ET.Element('product')
product.attrib['id'] = str(p)
product.attrib['name'] = self._pathway.entries[p].name
reaction.append(product)
return reaction
# Relation
class Relation(object):
"""A relationship between to products, KOs, or protein and compound.
This describes a relationship between two products, KOs, or protein
and compound, as described in release KGML v0.7.1
(http://www.kegg.jp/kegg/xml/docs/)
Attributes:
- entry1 The first Entry object node ID defining the
relation (int)
- entry2 The second Entry object node ID defining the
relation (int)
- type The relation type
- subtypes List of subtypes for the relation, as a list of
(name, value) tuples
"""
def __init__(self):
self._entry1 = None
self._entry2 = None
self.type = ''
self.subtypes = []
self._pathway = None
def __str__(self):
"""A useful human-readable string."""
outstr = ['Relation (subtypes: %d):' % len(self.subtypes),
'Entry1:', str(self.entry1),
'Entry2:', str(self.entry2)]
for s in self.subtypes:
outstr.extend(['Subtype: %s' % s[0], str(s[1])])
return '\n'.join(outstr)
# Properties entry1 and entry2
def _getentry1(self):
if self._pathway is not None:
return self._pathway.entries[self._entry1]
return self._entry1
def _setentry1(self, value):
self._entry1 = int(value)
def _delentry1(self):
del self._entry1
entry1 = property(_getentry1, _setentry1, _delentry1,
"Entry1 of the relation.")
def _getentry2(self):
if self._pathway is not None:
return self._pathway.entries[self._entry2]
return self._entry2
def _setentry2(self, value):
self._entry2 = int(value)
def _delentry2(self):
del self._entry2
entry2 = property(_getentry2, _setentry2, _delentry2,
"Entry2 of the relation.")
@property
def element(self):
"""Return KGML element describing the Relation."""
# The root is this Relation element
relation = ET.Element('relation')
relation.attrib = {'entry1': str(self._entry1),
'entry2': str(self._entry2),
'type': self.type}
for (name, value) in self.subtypes:
subtype = ET.Element('subtype')
subtype.attrib[name] = str(value)
relation.append(subtype)
return relation
|
zjuchenyuan/BioWeb
|
Lib/Bio/KEGG/KGML/KGML_pathway.py
|
Python
|
mit
| 28,379
|
[
"Biopython"
] |
eb9062c37013763be7dd7db7a34551d338a8433e8c26002c7514b7a87abc6e6b
|
""" BaseHistoryCorrector is a base class for correctors of user shares within
a given group based on the history of the resources consumption
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import time as nativetime
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getDNForUsername
from DIRAC.WorkloadManagementSystem.private.correctors.BaseCorrector import BaseCorrector
class BaseHistoryCorrector(BaseCorrector):
_GLOBAL_MAX_CORRECTION = 'MaxGlobalCorrection'
_SLICE_TIME_SPAN = 'TimeSpan'
_SLICE_WEIGHT = 'Weight'
_SLICE_MAX_CORRECTION = 'MaxCorrection'
def initialize(self):
self.log = gLogger.getSubLogger("HistoryCorrector")
self.__usageHistory = {}
self.__slices = {}
self.__lastHistoryUpdate = 0
self.__globalCorrectionFactor = 5
self._fillSlices()
return S_OK()
def _fillSlices(self):
self.log.info("Filling time slices...")
self.__slices = {}
self.__globalCorrectionFactor = self.getCSOption(self._GLOBAL_MAX_CORRECTION, 5)
result = self.getCSSections()
if not result['OK']:
self.log.error("Cound not get configured time slices", result['Message'])
return
timeSlices = result['Value']
for timeSlice in timeSlices:
self.__slices[timeSlice] = {}
for key, defaultValue in ((self._SLICE_TIME_SPAN, 604800),
(self._SLICE_WEIGHT, 1),
(self._SLICE_MAX_CORRECTION, 3)):
self.__slices[timeSlice][key] = self.getCSOption("%s/%s" % (timeSlice, key), defaultValue)
# Weight has to be normalized to sum 1
weightSum = 0
for timeSlice in self.__slices:
weightSum += self.__slices[timeSlice][self._SLICE_WEIGHT]
for timeSlice in self.__slices:
self.__slices[timeSlice][self._SLICE_WEIGHT] /= float(weightSum)
self.log.info("Found %s time slices" % len(self.__slices))
def updateHistoryKnowledge(self):
updatePeriod = self.getCSOption('UpdateHistoryPeriod', 900)
now = nativetime.time()
if self.__lastHistoryUpdate + updatePeriod > now:
self.log.verbose("Skipping history update. Last update was less than %s secs ago" % updatePeriod)
return
self.__lastHistoryUpdate = now
self.log.info("Updating history knowledge")
self.__usageHistory = {}
for timeSlice in self.__slices:
result = self._getUsageHistoryForTimeSpan(self.__slices[timeSlice][self._SLICE_TIME_SPAN],
self.getGroup())
if not result['OK']:
self.__usageHistory = {}
self.log.warn("Could not get history for slice", "%s: %s" % (timeSlice, result['Message']))
return
self.__usageHistory[timeSlice] = result['Value']
self.log.verbose("Got history for slice %s (%s entities in slice)" %
(timeSlice, len(self.__usageHistory[timeSlice])))
self.log.info("Updated history knowledge")
def _getHistoryData(self, _timeSpan, _groupToUse):
""" Get history data from an external source to be defined in a derived class
:param int timeSpan: time span
:param str groupToUse: requested user group
:return: dictionary with history data
"""
return S_ERROR('Not implemented !')
def _getUsageHistoryForTimeSpan(self, timeSpan, groupToUse=""):
result = self._getHistoryData(timeSpan, groupToUse)
if not result['OK']:
self.log.error("Cannot get history data", result['Message'])
return result
data = result['Value'].get('data', [])
if not data:
message = "Empty history data"
self.log.warn(message)
return S_ERROR(message)
# Map the usernames to DNs
if groupToUse:
mappedData = {}
for userName in data:
result = getDNForUsername(userName)
if not result['OK']:
self.log.error("User does not have any DN assigned", "%s :%s" % (userName, result['Message']))
continue
for userDN in result['Value']:
mappedData[userDN] = data[userName]
data = mappedData
return S_OK(data)
def __normalizeShares(self, entityShares):
totalShare = 0.0
normalizedShares = {}
# Normalize shares
for entity in entityShares:
totalShare += entityShares[entity]
self.log.verbose("Total share for given entities is %.3f" % totalShare)
for entity in entityShares:
normalizedShare = entityShares[entity] / totalShare
normalizedShares[entity] = normalizedShare
self.log.verbose("Normalized share for %s: %.3f" % (entity, normalizedShare))
return normalizedShares
def applyCorrection(self, entitiesExpectedShare):
# Normalize expected shares
normalizedShares = self.__normalizeShares(entitiesExpectedShare)
if not self.__usageHistory:
self.log.verbose("No history knowledge available. Correction is 1 for all entities")
return entitiesExpectedShare
entitiesSliceCorrections = dict([(entity, []) for entity in entitiesExpectedShare])
for timeSlice in self.__usageHistory:
self.log.verbose("Calculating correction for time slice %s" % timeSlice)
sliceTotal = 0.0
sliceHistory = self.__usageHistory[timeSlice]
for entity in entitiesExpectedShare:
if entity in sliceHistory:
sliceTotal += sliceHistory[entity]
self.log.verbose("Usage for %s: %.3f" % (entity, sliceHistory[entity]))
self.log.verbose("Total usage for slice %.3f" % sliceTotal)
if sliceTotal == 0.0:
self.log.verbose("Slice usage is 0, skeeping slice")
continue
maxSliceCorrection = self.__slices[timeSlice][self._SLICE_MAX_CORRECTION]
minSliceCorrection = 1.0 / maxSliceCorrection
for entity in entitiesExpectedShare:
if entity in sliceHistory:
normalizedSliceUsage = sliceHistory[entity] / sliceTotal
self.log.verbose("Entity %s is present in slice %s (normalized usage %.2f)" % (entity,
timeSlice,
normalizedSliceUsage))
sliceCorrectionFactor = normalizedShares[entity] / normalizedSliceUsage
sliceCorrectionFactor = min(sliceCorrectionFactor, maxSliceCorrection)
sliceCorrectionFactor = max(sliceCorrectionFactor, minSliceCorrection)
sliceCorrectionFactor *= self.__slices[timeSlice][self._SLICE_WEIGHT]
else:
self.log.verbose("Entity %s is not present in slice %s" % (entity, timeSlice))
sliceCorrectionFactor = maxSliceCorrection
self.log.verbose("Slice correction factor for entity %s is %.3f" % (entity, sliceCorrectionFactor))
entitiesSliceCorrections[entity].append(sliceCorrectionFactor)
correctedEntityShare = {}
maxGlobalCorrectionFactor = self.__globalCorrectionFactor
minGlobalCorrectionFactor = 1.0 / maxGlobalCorrectionFactor
for entity in entitiesSliceCorrections:
entityCorrectionFactor = 0.0
slicesCorrections = entitiesSliceCorrections[entity]
if not slicesCorrections:
self.log.verbose("Entity does not have any correction %s" % entity)
correctedEntityShare[entity] = entitiesExpectedShare[entity]
else:
for cF in entitiesSliceCorrections[entity]:
entityCorrectionFactor += cF
entityCorrectionFactor = min(entityCorrectionFactor, maxGlobalCorrectionFactor)
entityCorrectionFactor = max(entityCorrectionFactor, minGlobalCorrectionFactor)
correctedShare = entitiesExpectedShare[entity] * entityCorrectionFactor
correctedEntityShare[entity] = correctedShare
self.log.verbose(
"Final correction factor for entity %s is %.3f\n Final share is %.3f" %
(entity, entityCorrectionFactor, correctedShare))
self.log.verbose("Initial shares:\n %s" % "\n ".join(["%s : %.2f" % (en, entitiesExpectedShare[en])
for en in entitiesExpectedShare]))
self.log.verbose("Corrected shares:\n %s" % "\n ".join(["%s : %.2f" % (en, correctedEntityShare[en])
for en in correctedEntityShare]))
return correctedEntityShare
|
yujikato/DIRAC
|
src/DIRAC/WorkloadManagementSystem/private/correctors/BaseHistoryCorrector.py
|
Python
|
gpl-3.0
| 8,418
|
[
"DIRAC"
] |
01fe56ddf71f76d00ab42cd14eb6a09738690057c339ff699d032e5abd018a24
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 17 14:10:58 2017
@author: tkc
"""
import os
import pandas as pd
from tkinter import filedialog
AESQUANTPARAMFILE='C:\\Users\\tkc\\Documents\\Python_Scripts\\Augerquant\\Params\\AESquantparams.csv'
class AESspectrum():
''' Single instance of AES spectra file created from row of spelist (child of AESdataset)
load file from AESdataset (pd dataframe row)
#TODO add direct file load? '''
def __init__(self, AESdataset, rowindex):
# can be opened with AESdataset parent and associated row from
# open files from directory arg
self.AESdataset=AESdataset
self.path=self.AESdataset.path # same path as AESdataset parent
# load params from spelist only (not AESlog which has images)
row=AESdataset.spelist.iloc[rowindex]
self.filename=row.Filename
self.sample=str(row.Sample)
self.numareas=int(row.Areas)
self.evbreaks=row.Evbreaks # TODO data type?
self.spectype = row.Type.lower() # multiplex or survey
self.AESdf = None # entire AES dataframe (all areas)
self.energy = None # same for all cols
self.open_csvfile()
self.aesquantparams = None
self.loadAESquantparams()
# load peaks, shifts, ampls, widths
self.smdifpeakinfo=None # dataframe w/ smdiff peak info
self.get_peaks() # get quant info from smdifpeakslog
self.integpeakinfo=None # dataframe w/ smdiff peak info
self.get_integ_peaks() # get quant info from smdifpeakslog
self.elems_integ = None #
print('Auger file', self.filename, 'loaded.')
def open_csvfile(self):
''' Read Auger spectral file '''
self.AESdf=pd.read_csv(self.filename.replace('.spe','.csv'))
self.colset=self.AESdf.columns # Counts1, Counts2, S7D71, S7D72, etc.
self.energy=self.AESdf['Energy']
print('AESfile ', self.filename,' loaded.')
def loadAESquantparams(self):
''' Loads standard values of Auger quant parameters
TODO what about dealing with local shifts '''
# Checkbutton option for local (or standard) AESquantparams in file loader?
print('AESquantparams loaded')
self.aesquantparams=pd.read_csv(AESQUANTPARAMFILE, encoding='utf-8')
def get_peaks(self):
''' Finds element quant already performed from smdifflog (within AESdataset)
needed for plots: negpeak, pospeak (both indirect calc from shift, peakwidth)
negint, posint (but usually indirectly
ideal positions for peaks already loaded in AESdataset
smdifpeakinfo contains: 0 peak, 1 negpeak energy, 2 pospeak energy, 3) negint
4) posint 5) ampl, 6) adjampl -- important stuff for graphical display
of quant results
returns dataframe for this filename
'''
mycols=['Areanumber', 'Peakenergy', 'Peakindex', 'PeakID', 'Shift',
'Negintensity', 'Posintensity', 'Pospeak', 'Amplitude', 'Peakwidth','Adjamp']
self.smdifpeakinfo=self.AESdataset.Smdifpeakslog[ (self.AESdataset.Smdifpeakslog['Filename']==self.filename)]
self.smdifpeakinfo=self.smdifpeakinfo[mycols]
def get_integ_peaks(self):
''' Pull existing quant results from integ log file (if present) '''
mycols=['Areanumber', 'Element', 'Integcounts', 'Backcounts',
'Significance', 'Adjcnts','Erradjcnts']
self.integpeakinfo=self.AESdataset.Integquantlog[ (self.AESdataset.Integquantlog['Filename']==self.filename)]
self.integpeakinfo=self.integpeakinfo[mycols]
def savecsv():
''' Save any changes to underlying csv file '''
class AESdataset():
''' loads all dataframes with Auger parameters from current project folder '''
def __init__(self, *args, **kwargs):
self.path = filedialog.askdirectory()
# open files
self.AESlog=None
self.spelist=None
self.Smdifpeakslog=None
self.Integquantlog=None
self.Backfitlog=None
self.open_main_files() # loads above
# self.filelist=np.ndarray.tolist(self.AESlog.Filenumber.unique())
self.numfiles=len(self.AESlog)
print(str(self.numfiles),' loaded from AESdataset.')
self.peaks=None
self.peakdata=None
self.get_peakinfo() # load needed Auger peak params (Peaks and Peakdata)
def get_peakinfo(self):
''' takes element strings and energies of background regs and returns tuple for each elem symbol containing all params necessary to find each Auger peak from given spe file
also returns 2-tuple with energy val and index of chosen background regions
'''
# elemental lines (incl Fe2, Fe1, etc.)
self.peaks=self.Smdifpeakslog.PeakID.unique()
self.peakdata=[]
for peak in self.peaks:
try:
# find row in AESquantparams for this element
thispeakdata=self.AESquantparams[(self.AESquantparams['element']==peak)]
thispeakdata=thispeakdata.squeeze() # series with this elements params
# return list of length numelements with 5-tuple for each containing
# 1) peak symbol, 2) ideal negpeak (eV) 3) ideal pospeak (in eV)
# 4)sensitivity kfactor.. and 5) error in kfactor
peaktuple=(peak, thispeakdata.negpeak, thispeakdata.pospeak,
thispeakdata.kfactor, thispeakdata.errkf1) # add tuple with info for this element
self.peakdata.append(peaktuple)
except:
print('AESquantparams not found for ', peak)
print('Found', len(self.peakdata), 'quant peaks in smdifpeakslog' )
def open_main_files(self):
''' Auto loads Auger param files from working directory including
AESparalog- assorted params associated w/ each SEM-AES or TEM-AES emsa file
Backfitparamslog - ranges and parameters for AES background fits
Integquantlog - subtracted and corrected counts for chosen elements
Peakfitlog - params of gaussian fits to each element (xc, width, peakarea, Y0, rsquared)'''
if os.path.exists('Augerparamlog.csv'):
self.AESlog=pd.read_csv('Augerparamlog.csv', encoding='cp437')
self.spelist=self.AESlog[pd.notnull(self.AESlog['Areas'])]
else:
self.AESlog=pd.DataFrame()
self.spelist=pd.DataFrame()
if os.path.exists('Smdifpeakslog.csv'):
self.Smdifpeakslog=pd.read_csv('Smdifpeakslog.csv', encoding='cp437')
else:
self.Smdifpeakslog=pd.DataFrame()
if os.path.exists('Backfitlog.csv'):
self.Backfitlog=pd.read_csv('Backfitlog.csv', encoding='cp437')
else:
self.Backfitlog=pd.DataFrame()
if os.path.exists('Integquantlog.csv'):
self.Integquantlog=pd.read_csv('Integquantlog.csv', encoding='cp437')
else:
self.Integquantlog=pd.DataFrame()
# Print TEM or SEM to console based on beam kV
try:
self.AESquantparams=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\Augerquant\\Params\\AESquantparams.csv', encoding='utf-8')
except:
self.AESquantparams=pd.DataFrame()
|
tkcroat/Augerquant
|
Modules/AES_data_classes.py
|
Python
|
mit
| 7,411
|
[
"Gaussian"
] |
ddd450040237c91bb21b20449ce6e3880f4910fc5ecb7b88cd77de89e657fc11
|
# -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""Utilities and wrappers around inspect module"""
import inspect
import re
# Local imports:
from SMlib.utils import encoding
SYMBOLS = r"[^\'\"a-zA-Z0-9_.]"
def getobj(txt, last=False):
"""Return the last valid object name in string"""
txt_end = ""
for startchar, endchar in ["[]", "()"]:
if txt.endswith(endchar):
pos = txt.rfind(startchar)
if pos:
txt_end = txt[pos:]
txt = txt[:pos]
tokens = re.split(SYMBOLS, txt)
token = None
try:
while token is None or re.match(SYMBOLS, token):
token = tokens.pop()
if token.endswith('.'):
token = token[:-1]
if token.startswith('.'):
# Invalid object name
return None
if last:
#XXX: remove this statement as well as the "last" argument
token += txt[ txt.rfind(token) + len(token) ]
token += txt_end
if token:
return token
except IndexError:
return None
def getobjdir(obj):
"""
For standard objects, will simply return dir(obj)
In special cases (e.g. WrapITK package), will return only string elements
of result returned by dir(obj)
"""
return [item for item in dir(obj) if isinstance(item, basestring)]
def getdoc(obj):
"""
Return text documentation from an object. This comes in a form of
dictionary with four keys:
name:
The name of the inspected object
argspec:
It's argspec
note:
A phrase describing the type of object (function or method) we are
inspecting, and the module it belongs to.
docstring:
It's docstring
"""
docstring = inspect.getdoc(obj) or inspect.getcomments(obj) or ''
# Most of the time doc will only contain ascii characters, but there are
# some docstrings that contain non-ascii characters. Not all source files
# declare their encoding in the first line, so querying for that might not
# yield anything, either. So assume the most commonly used
# multi-byte file encoding (which also covers ascii).
try:
docstring = unicode(docstring, 'utf-8')
except:
pass
# Doc dict keys
doc = {'name': '',
'argspec': '',
'note': '',
'docstring': docstring}
if callable(obj):
try:
name = obj.__name__
except AttributeError:
doc['docstring'] = docstring
return doc
if inspect.ismethod(obj):
imclass = obj.im_class
if obj.im_self is not None:
doc['note'] = 'Method of %s instance' \
% obj.im_self.__class__.__name__
else:
doc['note'] = 'Unbound %s method' % imclass.__name__
obj = obj.im_func
elif hasattr(obj, '__module__'):
doc['note'] = 'Function of %s module' % obj.__module__
else:
doc['note'] = 'Function'
doc['name'] = obj.__name__
if inspect.isfunction(obj):
args, varargs, varkw, defaults = inspect.getargspec(obj)
doc['argspec'] = inspect.formatargspec(args, varargs, varkw,
defaults,
formatvalue=lambda o:'='+repr(o))
if name == '<lambda>':
doc['name'] = name + ' lambda '
doc['argspec'] = doc['argspec'][1:-1] # remove parentheses
else:
# Try to extract the argspec from the first docstring line
docstring_lines = doc['docstring'].split("\n")
first_line = docstring_lines[0].strip()
argspec = getsignaturesfromtext(first_line, '')
if argspec:
doc['argspec'] = argspec[0]
# Many scipy and numpy docstrings begin with a function
# signature on the first line. This ends up begin redundant
# when we are using title and argspec to create the
# rich text "Definition:" field. We'll carefully remove this
# redundancy but only under a strict set of conditions:
# Remove the starting charaters of the 'doc' portion *iff*
# the non-whitespace characters on the first line
# match *exactly* the combined function title
# and argspec we determined above.
name_and_argspec = doc['name'] + doc['argspec']
if first_line == name_and_argspec:
doc['docstring'] = doc['docstring'].replace(
name_and_argspec, '', 1).lstrip()
else:
doc['argspec'] = '(...)'
# Remove self from argspec
argspec = doc['argspec']
doc['argspec'] = argspec.replace('(self)', '()').replace('(self, ', '(')
return doc
def getsource(obj):
"""Wrapper around inspect.getsource"""
try:
try:
src = encoding.to_unicode( inspect.getsource(obj) )
except TypeError:
if hasattr(obj, '__class__'):
src = encoding.to_unicode( inspect.getsource(obj.__class__) )
else:
# Bindings like VTK or ITK require this case
src = getdoc(obj)
return src
except (TypeError, IOError):
return
def getsignaturesfromtext(text, objname):
"""Get object signatures from text (object documentation)
Return a list containing a single string in most cases
Example of multiple signatures: PyQt4 objects"""
#FIXME: the following regexp is not working with this example of docstring:
# QObject.connect(QObject, SIGNAL(), QObject, SLOT(), Qt.ConnectionType=Qt.AutoConnection) -> bool QObject.connect(QObject, SIGNAL(), callable, Qt.ConnectionType=Qt.AutoConnection) -> bool QObject.connect(QObject, SIGNAL(), SLOT(), Qt.ConnectionType=Qt.AutoConnection) -> bool
if isinstance(text, dict):
text = text.get('docstring', '')
return re.findall(objname+r'\([^\)]+\)', text)
def getargsfromtext(text, objname):
"""Get arguments from text (object documentation)"""
signatures = getsignaturesfromtext(text, objname)
if signatures:
signature = signatures[0]
argtxt = signature[signature.find('(')+1:-1]
return argtxt.split(',')
def getargsfromdoc(obj):
"""Get arguments from object doc"""
if obj.__doc__ is not None:
return getargsfromtext(obj.__doc__, obj.__name__)
def getargs(obj):
"""Get the names and default values of a function's arguments"""
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
func_obj = obj
elif inspect.ismethod(obj):
func_obj = obj.im_func
elif inspect.isclass(obj) and hasattr(obj, '__init__'):
func_obj = getattr(obj, '__init__')
else:
return []
if not hasattr(func_obj, 'func_code'):
# Builtin: try to extract info from doc
args = getargsfromdoc(func_obj)
if args is not None:
return args
else:
# Example: PyQt4
return getargsfromdoc(obj)
args, _, _ = inspect.getargs(func_obj.func_code)
if not args:
return getargsfromdoc(obj)
# Supporting tuple arguments in def statement:
for i_arg, arg in enumerate(args):
if isinstance(arg, list):
args[i_arg] = "(%s)" % ", ".join(arg)
defaults = func_obj.func_defaults
if defaults is not None:
for index, default in enumerate(defaults):
args[index+len(args)-len(defaults)] += '='+repr(default)
if inspect.isclass(obj) or inspect.ismethod(obj):
if len(args) == 1:
return None
if 'self' in args:
args.remove('self')
return args
def getargtxt(obj, one_arg_per_line=True):
"""
Get the names and default values of a function's arguments
Return list with separators (', ') formatted for calltips
"""
args = getargs(obj)
if args:
sep = ', '
textlist = None
for i_arg, arg in enumerate(args):
if textlist is None:
textlist = ['']
textlist[-1] += arg
if i_arg < len(args)-1:
textlist[-1] += sep
if len(textlist[-1]) >= 32 or one_arg_per_line:
textlist.append('')
if inspect.isclass(obj) or inspect.ismethod(obj):
if len(textlist) == 1:
return None
if 'self'+sep in textlist:
textlist.remove('self'+sep)
return textlist
def isdefined(obj, force_import=False, namespace=None):
"""Return True if object is defined in namespace
If namespace is None --> namespace = locals()"""
if namespace is None:
namespace = locals()
attr_list = obj.split('.')
base = attr_list.pop(0)
if len(base) == 0:
return False
import __builtin__
if base not in __builtin__.__dict__ and base not in namespace:
if force_import:
try:
module = __import__(base, globals(), namespace)
if base not in globals():
globals()[base] = module
namespace[base] = module
except (ImportError, SyntaxError):
return False
else:
return False
for attr in attr_list:
try:
attr_not_found = not hasattr(eval(base, namespace), attr)
except SyntaxError:
return False
if attr_not_found:
if force_import:
try:
__import__(base+'.'+attr, globals(), namespace)
except (ImportError, SyntaxError):
return False
else:
return False
base += '.'+attr
return True
if __name__ == "__main__":
class Test(object):
def method(self, x, y=2, (u, v, w)=(None, 0, 0)):
pass
print getargtxt(Test.__init__)
print getargtxt(Test.method)
print isdefined('numpy.take', force_import=True)
print isdefined('__import__')
print isdefined('.keys', force_import=True)
print getobj('globals')
print getobj('globals().keys')
print getobj('+scipy.signal.')
print getobj('4.')
print getdoc(sorted)
print getargtxt(sorted)
|
koll00/Gui_SM
|
SMlib/utils/dochelpers.py
|
Python
|
mit
| 10,892
|
[
"VTK"
] |
de100036c8f6b7e8ffdb5cbf1a6ca6b395e599bad7de1da166bf26783c7729b8
|
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""exceptions handling (raising, catching, exceptions classes) checker
"""
import sys
from ..logilab.common.compat import builtins
BUILTINS_NAME = builtins.__name__
from .. import astroid
from ..astroid import YES, Instance, unpack_infer
from . import BaseChecker
from .utils import is_empty, is_raising, check_messages
from ..interfaces import IAstroidChecker
OVERGENERAL_EXCEPTIONS = ('Exception',)
MSGS = {
'E0701': ('Bad except clauses order (%s)',
'bad-except-order',
'Used when except clauses are not in the correct order (from the '
'more specific to the more generic). If you don\'t fix the order, '
'some exceptions may not be catched by the most specific handler.'),
'E0702': ('Raising %s while only classes, instances or string are allowed',
'raising-bad-type',
'Used when something which is neither a class, an instance or a \
string is raised (i.e. a `TypeError` will be raised).'),
'E0710': ('Raising a new style class which doesn\'t inherit from BaseException',
'raising-non-exception',
'Used when a new style class which doesn\'t inherit from \
BaseException is raised.'),
'E0711': ('NotImplemented raised - should raise NotImplementedError',
'notimplemented-raised',
'Used when NotImplemented is raised instead of \
NotImplementedError'),
'W0701': ('Raising a string exception',
'raising-string',
'Used when a string exception is raised.'),
'W0702': ('No exception type(s) specified',
'bare-except',
'Used when an except clause doesn\'t specify exceptions type to \
catch.'),
'W0703': ('Catching too general exception %s',
'broad-except',
'Used when an except catches a too general exception, \
possibly burying unrelated errors.'),
'W0704': ('Except doesn\'t do anything',
'pointless-except',
'Used when an except clause does nothing but "pass" and there is\
no "else" clause.'),
'W0710': ('Exception doesn\'t inherit from standard "Exception" class',
'nonstandard-exception',
'Used when a custom exception class is raised but doesn\'t \
inherit from the builtin "Exception" class.'),
'W0711': ('Exception to catch is the result of a binary "%s" operation',
'binary-op-exception',
'Used when the exception to catch is of the form \
"except A or B:". If intending to catch multiple, \
rewrite as "except (A, B):"'),
'W0712': ('Implicit unpacking of exceptions is not supported in Python 3',
'unpacking-in-except',
'Python3 will not allow implicit unpacking of exceptions in except '
'clauses. '
'See http://www.python.org/dev/peps/pep-3110/',
{'maxversion': (3, 0)}),
}
if sys.version_info < (3, 0):
EXCEPTIONS_MODULE = "exceptions"
else:
EXCEPTIONS_MODULE = "builtins"
class ExceptionsChecker(BaseChecker):
"""checks for
* excepts without exception filter
* type of raise argument : string, Exceptions, other values
"""
__implements__ = IAstroidChecker
name = 'exceptions'
msgs = MSGS
priority = -4
options = (('overgeneral-exceptions',
{'default' : OVERGENERAL_EXCEPTIONS,
'type' :'csv', 'metavar' : '<comma-separated class names>',
'help' : 'Exceptions that will emit a warning '
'when being caught. Defaults to "%s"' % (
', '.join(OVERGENERAL_EXCEPTIONS),)}
),
)
@check_messages('W0701', 'W0710', 'E0702', 'E0710', 'E0711')
def visit_raise(self, node):
"""visit raise possibly inferring value"""
# ignore empty raise
if node.exc is None:
return
expr = node.exc
if self._check_raise_value(node, expr):
return
else:
try:
value = unpack_infer(expr).next()
except astroid.InferenceError:
return
self._check_raise_value(node, value)
def _check_raise_value(self, node, expr):
"""check for bad values, string exception and class inheritance
"""
value_found = True
if isinstance(expr, astroid.Const):
value = expr.value
if isinstance(value, str):
self.add_message('W0701', node=node)
else:
self.add_message('E0702', node=node,
args=value.__class__.__name__)
elif (isinstance(expr, astroid.Name) and \
expr.name in ('None', 'True', 'False')) or \
isinstance(expr, (astroid.List, astroid.Dict, astroid.Tuple,
astroid.Module, astroid.Function)):
self.add_message('E0702', node=node, args=expr.name)
elif ( (isinstance(expr, astroid.Name) and expr.name == 'NotImplemented')
or (isinstance(expr, astroid.CallFunc) and
isinstance(expr.func, astroid.Name) and
expr.func.name == 'NotImplemented') ):
self.add_message('E0711', node=node)
elif isinstance(expr, astroid.BinOp) and expr.op == '%':
self.add_message('W0701', node=node)
elif isinstance(expr, (Instance, astroid.Class)):
if isinstance(expr, Instance):
expr = expr._proxied
if (isinstance(expr, astroid.Class) and
not inherit_from_std_ex(expr) and
expr.root().name != BUILTINS_NAME):
if expr.newstyle:
self.add_message('E0710', node=node)
else:
self.add_message('W0710', node=node)
else:
value_found = False
else:
value_found = False
return value_found
@check_messages('W0712')
def visit_excepthandler(self, node):
"""Visit an except handler block and check for exception unpacking."""
if isinstance(node.name, (astroid.Tuple, astroid.List)):
self.add_message('W0712', node=node)
@check_messages('W0702', 'W0703', 'W0704', 'W0711', 'E0701')
def visit_tryexcept(self, node):
"""check for empty except"""
exceptions_classes = []
nb_handlers = len(node.handlers)
for index, handler in enumerate(node.handlers):
# single except doing nothing but "pass" without else clause
if nb_handlers == 1 and is_empty(handler.body) and not node.orelse:
self.add_message('W0704', node=handler.type or handler.body[0])
if handler.type is None:
if nb_handlers == 1 and not is_raising(handler.body):
self.add_message('W0702', node=handler)
# check if a "except:" is followed by some other
# except
elif index < (nb_handlers - 1):
msg = 'empty except clause should always appear last'
self.add_message('E0701', node=node, args=msg)
elif isinstance(handler.type, astroid.BoolOp):
self.add_message('W0711', node=handler, args=handler.type.op)
else:
try:
excs = list(unpack_infer(handler.type))
except astroid.InferenceError:
continue
for exc in excs:
# XXX skip other non class nodes
if exc is YES or not isinstance(exc, astroid.Class):
continue
exc_ancestors = [anc for anc in exc.ancestors()
if isinstance(anc, astroid.Class)]
for previous_exc in exceptions_classes:
if previous_exc in exc_ancestors:
msg = '%s is an ancestor class of %s' % (
previous_exc.name, exc.name)
self.add_message('E0701', node=handler.type, args=msg)
if (exc.name in self.config.overgeneral_exceptions
and exc.root().name == EXCEPTIONS_MODULE
and nb_handlers == 1 and not is_raising(handler.body)):
self.add_message('W0703', args=exc.name, node=handler.type)
exceptions_classes += excs
def inherit_from_std_ex(node):
"""return true if the given class node is subclass of
exceptions.Exception
"""
if node.name in ('Exception', 'BaseException') \
and node.root().name == EXCEPTIONS_MODULE:
return True
for parent in node.ancestors(recurs=False):
if inherit_from_std_ex(parent):
return True
return False
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(ExceptionsChecker(linter))
|
lukaszpiotr/pylama_with_gjslint
|
pylama/checkers/pylint/checkers/exceptions.py
|
Python
|
lgpl-3.0
| 10,006
|
[
"VisIt"
] |
df8bf2d78bca24aeac0965ddf879aeb94aa95a814cfae0c2c45a26ad8a870e99
|
#
# Copyright 2015 by Justin MacCallum, Alberto Perez, Ken Dill
# All rights reserved
#
from .openmm_runner import OpenMMRunner
class ReplicaRunner(object):
def initialize(self):
pass
def minimize_then_run(self, state):
pass
def run(self, state):
pass
def get_energy(self, state):
pass
def set_alpha_and_timestep(self, state, timestep):
pass
class FakeSystemRunner(object):
'''
Fake runner for test purposes.
'''
def __init__(self, system, options, communicator=None):
self.temperature_scaler = system.temperature_scaler
def set_alpha_and_timestep(self, alpha, timestep):
pass
def minimize_then_run(self, state):
return state
def run(self, state):
return state
def get_energy(self, state):
return 0.
def get_runner(system, options, comm):
if options.runner == 'openmm':
return OpenMMRunner(system, options, comm)
elif options.runner == 'fake_runner':
return FakeSystemRunner(system, options, comm)
else:
raise RuntimeError('Unknown type of runner: {}'.format(options.runner))
|
laufercenter/meld
|
meld/system/runner.py
|
Python
|
mit
| 1,159
|
[
"OpenMM"
] |
1247875182fb794e2df4a8a6e34d2bb4938e48663817636c5624dead586b55f3
|
########################################################################
# $HeadURL$
# File : BOINCComputingElement.py
# Author : J.Wu
########################################################################
""" BOINC Computing Element
"""
__RCSID__ = "$Id$"
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
from DIRAC import S_OK, S_ERROR
import os, bz2, base64, tempfile
from urlparse import urlparse
CE_NAME = 'BOINC'
class BOINCComputingElement( ComputingElement ):
###############################################################################
def __init__( self, ceUniqueID ):
""" Standard constructor.
"""
ComputingElement.__init__( self, ceUniqueID )
self.ceType = CE_NAME
self.mandatoryParameters = []
self.wsdl = None
self.BOINCClient = None
#define a job prefix based on the wsdl url
self.suffix = None
# this is for standlone test
# self.ceParameters['projectURL'] = 'http://mardirac3.in2p3.fr:7788/?wsdl'
# self.ceParameters['Platform'] = 'Linux_x86_64_glibc-2.5'
###############################################################################
def createClient( self ):
"""
This method only can be called after the initialisation of this class. In this
method, it will initial some variables and create a soap client for communication
with BOINC server.
"""
if not self.wsdl:
self.wsdl = self.ceParameters['projectURL']
if not self.suffix:
result = urlparse(self.wsdl)
self.suffix = result.hostname
if not self.BOINCClient:
try:
from suds.client import Client
import logging
logging.basicConfig(format="%(asctime)-15s %(message)s")
self.BOINCClient = Client(self.wsdl)
except Exception, x:
self.log.error( 'Creation of the soap client failed: %s' % str( x ) )
pass
###############################################################################
def submitJob( self, executableFile, proxy = None, numberOfJobs = 1 ):
""" Method to submit job
"""
self.createClient( )
# Check if the client is ready
if not self.BOINCClient:
return S_ERROR( 'Soap client is not ready' )
self.log.verbose( "Executable file path: %s" % executableFile )
# if no proxy is supplied, the executable can be submitted directly
# otherwise a wrapper script is needed to get the proxy to the execution node
# The wrapper script makes debugging more complicated and thus it is
# recommended to transfer a proxy inside the executable if possible.
wrapperContent = ''
if proxy:
self.log.verbose( 'Setting up proxy for payload' )
compressedAndEncodedProxy = base64.encodestring( bz2.compress( proxy.dumpAllToString()['Value'] ) ).replace( '\n', '' )
compressedAndEncodedExecutable = base64.encodestring( bz2.compress( open( executableFile, "rb" ).read(), 9 ) ).replace( '\n', '' )
wrapperContent = """#!/bin/bash
/usr/bin/env python << EOF
# Wrapper script for executable and proxy
import os, tempfile, sys, base64, bz2, shutil
try:
workingDirectory = tempfile.mkdtemp( suffix = '_wrapper', prefix= 'TORQUE_' )
os.chdir( workingDirectory )
open( 'proxy', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedProxy)s" ) ) )
open( '%(executable)s', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedExecutable)s" ) ) )
os.chmod('proxy',0600)
os.chmod('%(executable)s',0700)
os.environ["X509_USER_PROXY"]=os.path.join(workingDirectory, 'proxy')
except Exception, x:
print >> sys.stderr, x
sys.exit(-1)
cmd = "./%(executable)s"
print 'Executing: ', cmd
sys.stdout.flush()
os.system( cmd )
shutil.rmtree( workingDirectory )
EOF
""" % { 'compressedAndEncodedProxy': compressedAndEncodedProxy, \
'compressedAndEncodedExecutable': compressedAndEncodedExecutable, \
'executable': os.path.basename( executableFile ) }
fd, name = tempfile.mkstemp( suffix = '_pilotwrapper.py', prefix = 'DIRAC_', dir = os.getcwd() )
os.close( fd )
submitFile = name
else: # no proxy
submitFile = executableFile
wrapperContent = self._fromFileToStr( submitFile )
if not wrapperContent:
self.log.error( 'Executable file is empty.' )
return S_ERROR( 'Executable file is empty.' )
#Some special symbol can not be transported by xml,
#such as less, greater, amp. So, base64 is used here.
wrapperContent = base64.encodestring( wrapperContent ).replace( "\n",'')
prefix = os.path.splitext( os.path.basename( submitFile ) )[0].replace( '_pilotwrapper', '' ).replace( 'DIRAC_', '' )
batchIDList = []
stampDict = {}
for i in range( 0, numberOfJobs ):
jobID = "%s_%d@%s" % ( prefix, i, self.suffix)
try:
# print jobID + "\n" + wrapperContent
# print self.BOINCClient
result = self.BOINCClient.service.submitJob( jobID, wrapperContent,self.ceParameters['Platform'][0], self.ceParameters['MarketPlaceID'] )
except:
self.log.error( 'Could not submit the pilot %s to the BOINC CE %s, communication failed!' % (jobID, self.wsdl ))
break;
if not result['ok']:
self.log.warn( 'Didn\'t submit the pilot %s to the BOINC CE %s, the value returned is false!' % (jobID,
self.wsdl ))
break;
self.log.verbose( 'Submit the pilot %s to the BONIC CE %s' % (jobID, self.wsdl) )
diracStamp = "%s_%d" % ( prefix, i )
batchIDList.append( jobID )
stampDict[jobID] = diracStamp
if batchIDList:
resultRe = S_OK( batchIDList )
resultRe['PilotStampDict'] = stampDict
else:
resultRe = S_ERROR('Submit no pilot to BOINC CE %s' % self.wsdl)
return resultRe
#############################################################################
def getCEStatus( self ):
""" Method to get the BONIC CE dynamic jobs information.
"""
self.createClient( )
# Check if the client is ready
if not self.BOINCClient:
self.log.error( 'Soap client is not ready.' )
return S_ERROR( 'Soap client is not ready.' )
try:
result = self.BOINCClient.service.getDynamicInfo( )
except:
self.log.error( 'Could not get the BOINC CE %s dynamic jobs information, communication failed!' % self.wsdl )
return S_ERROR( 'Could not get the BOINC CE %s dynamic jobs information, communication failed!' % self.wsdl )
if not result['ok']:
self.log.warn( 'Did not get the BONIC CE %s dynamic jobs information, the value returned is false!' % self.wsdl )
return S_ERROR( 'Did not get the BONIC CE %s dynamic jobs information, the value returned is false!' % self.wsdl )
self.log.verbose( 'Get the BOINC CE %s dynamic jobs info.' % self.wsdl )
resultRe = S_OK()
resultRe['WaitingJobs'] = result['values'][0][0]
resultRe['RunningJobs'] = result['values'][0][1]
resultRe['SubmittedJobs'] = 0
self.log.verbose( 'Waiting Jobs: ', resultRe['WaitingJobs'] )
self.log.verbose( 'Running Jobs: ', resultRe['RunningJobs'] )
return resultRe
#############################################################################
def getJobStatus( self, jobIDList ):
""" Get the status information about jobs in the given list
"""
self.createClient( )
# Check if the client is ready
if not self.BOINCClient:
self.log.error( 'Soap client is not ready.' )
return S_ERROR( 'Soap client is not ready.' )
wsdl_jobIDList = self.BOINCClient.factory.create( 'stringArray' )
for job in jobIDList :
try:
job = job.split("@")[0]
except:
self.log.debug("The job id is %s" % job)
pass
wsdl_jobIDList[0].append( job )
try:
result = self.BOINCClient.service.getJobStatus( wsdl_jobIDList )
except:
self.log.error( 'Could not get the status about jobs in the list from the BONIC CE %s, commnication failed!' % self.wsdl )
return S_ERROR( 'Could not get the status about jobs in the list from the BONIC CE %s, commnication failed!' % self.wsdl )
if not result['ok']:
self.log.warn( 'Did not get the status about jobs in the list from the BONIC CE %s, the value returned is false!' % self.wsdl )
return S_ERROR( 'Did not get the status about jobs in the list from the BONIC CE %s, the value returned is false!' % self.wsdl )
self.log.debug( 'Got the status about jobs in list from the BOINC CE %s.' % self.wsdl )
resultRe = { }
for jobStatus in result['values'][0]:
(jobID, status) = jobStatus.split(":")
jobID = "%s@%s" % ( jobID, self.suffix)
resultRe[jobID] = status
return S_OK( resultRe )
#############################################################################
def getJobOutput( self, jobID, localDir = None ):
""" Get the stdout and stderr outputs of the specified job . If the localDir is provided,
the outputs are stored as files in this directory and the name of the files are returned.
Otherwise, the outputs are returned as strings.
"""
self.createClient( )
# Check if the client is ready
if not self.BOINCClient:
self.log.error( 'Soap client is not ready.' )
return S_ERROR( 'Soap client is not ready.' )
try:
tempID = jobID.split("@")[0]
except:
tempID = jobID
try:
result = self.BOINCClient.service.getJobOutput( tempID )
except:
self.log.error( 'Could not get the outputs of job %s from the BONIC CE %s, commnication failed!' % (jobID, self.wsdl) )
return S_ERROR( 'Could not get the outputs of job %s from the BONIC CE %s, commnication failed!' % (jobID, self.wsdl) )
if not result['ok']:
self.log.warn( 'Did not get the outputs of job %s from the BONIC CE %s, the value returned is false!' % (jobID, self.wsdl) )
return S_ERROR( 'Did not get the outputs of job %s from the BONIC CE %s, the value returend is false!' % (jobID, self.wsdl) )
self.log.debug( 'Got the outputs of job %s from the BONIC CE %s.' % (jobID, self.wsdl) )
strOutfile = base64.decodestring( result['values'][0][0] )
strErrorfile = base64.decodestring( result['values'][0][1] )
if localDir:
outFile = os.path.join( localDir, 'BOINC_%s.out' % jobID )
self._fromStrToFile( strOutfile, outFile )
errorFile = os.path.join( localDir, 'BOINC_%s.err' % jobID )
self._fromStrToFile( strErrorfile, errorFile )
return S_OK( ( outFile, errorFile ) )
else:
# Return the outputs as a string
return S_OK( ( strOutfile, strErrorfile ) )
##############################################################################
def _fromFileToStr(self, fileName ):
""" Read a file and return the file content as a string
"""
strFile = ''
if( os.path.exists( fileName )):
try:
fileHander = open ( fileName, "r" )
strFile = fileHander.read ( )
except:
self.log.verbose( "To read file %s failed!\n" % fileName)
pass
finally:
if fileHander:
fileHander.close( )
return strFile
#####################################################################
def _fromStrToFile(self, strContent, fileName ):
""" Write a string to a file
"""
try:
fileHandler = open ( fileName, "w" )
fileHandler.write ( strContent )
except:
self.log.verbose( "To create %s failed!" % fileName )
pass
finally:
if fileHandler:
fileHandler.close( )
# testing this
if __name__ == "__main__":
test_boinc = BOINCComputingElement( 12 )
test_submit = 1
test_getStatus = 2
test_getDynamic = 4
test_getOutput = 8
test_parameter = 4
jobID = 'zShvbK_0@mardirac3.in2p3.fr'
if test_parameter & test_submit:
fd, fname = tempfile.mkstemp( suffix = '_pilotwrapper.py', prefix = 'DIRAC_', dir = "/home/client/dirac/data/" )
os.close( fd )
fd = open ( fname,"w" )
fd.write('#!/usr/bin/env sh\necho \"I am stadard out\" >&1 \necho \"I am stadard error\" >&2 ')
fd.close()
result = test_boinc.submitJob( fname )
if not result['OK']:
print result['Message']
else:
jobID = result['Value'][0]
print "Successfully submit a job %s" % jobID
if test_parameter & test_getStatus:
jobTestList = ["Uu0ghO_0@mardirac3.in2p3.fr", "1aDmIf_0@mardirac3.in2p3.fr", jobID]
jobStatus = test_boinc.getJobStatus( jobTestList )
if not jobStatus['OK']:
print jobStatus['Message']
else:
for id_ in jobTestList:
print 'The status of the job %s is %s' % (id_, jobStatus['Value'][id_])
if test_parameter & test_getDynamic:
serverState = test_boinc.getCEStatus()
if not serverState['OK']:
print serverState['Message']
else:
print 'The number of jobs waiting is %s' % serverState['WaitingJobs']
print 'The number of jobs running is %s' % serverState['RunningJobs']
if test_parameter & test_getOutput:
outstate = test_boinc.getJobOutput( jobID, "/tmp/" )
if not outstate['OK']:
print outstate['Message']
else:
print "Please check the directory /tmp for the output and error files of job %s" % jobID
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
avedaee/DIRAC
|
Resources/Computing/BOINCComputingElement.py
|
Python
|
gpl-3.0
| 13,464
|
[
"DIRAC"
] |
46b049e022bdea7d3444fb8e6ca379325a75132e3b43422b20f0cfd4737a4c06
|
"""
@name: Modules/Computer/Nodes/node_status.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2016-2020 by D. Brian Kimmel
@note: Created on Jul 15, 2016
@license: MIT License
@summary: Periodically check the other nodes and:
Send alerts when things happen
Delete node when dead for a week (send delete message too).
"""
__updated__ = '2020-01-24'
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/Computer/Nodes/node_status.py
|
Python
|
mit
| 445
|
[
"Brian"
] |
123a2b5f489ac7ef74f75b09a89fa39a5e5350d0c00090e79b0fe0d987562e58
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import numpy as np
import itertools
import espressomd.lb
np.random.seed(seed=40)
"""
Check linear momentum calculation for lattice-Boltzmann.
"""
AGRID = .5
EXT_FORCE = .1
VISC = 2.7
DENS = 1.7
TIME_STEP = 0.1
BOX_L = 3.0
LB_PARAMS = {'agrid': AGRID,
'dens': DENS,
'visc': VISC,
'tau': TIME_STEP,
'ext_force_density': [0.1, 0.2, 0.3]}
class LinearMomentumTest:
"""Base class of the test that holds the test logic."""
system = espressomd.System(box_l=[BOX_L] * 3)
system.time_step = TIME_STEP
system.cell_system.skin = 0.4 * AGRID
def setUp(self):
self.lbf = self.lb_class(**LB_PARAMS)
self.system.actors.add(self.lbf)
def tearDown(self):
self.system.actors.clear()
def test(self):
"""
Compare direct calculation of fluid momentum with analysis function.
"""
# setup random node velocities
for index in itertools.product(
np.arange(0, int(np.floor(BOX_L / AGRID))), repeat=3):
self.lbf[index].velocity = np.random.random(3) - 0.5
linear_momentum = np.zeros(3)
for index in itertools.product(
np.arange(0, int(np.floor(BOX_L / AGRID))), repeat=3):
linear_momentum += DENS * AGRID**3.0 * self.lbf[index].velocity
analyze_linear_momentum = self.system.analysis.linear_momentum(True, # particles
True) # LB fluid
np.testing.assert_allclose(
linear_momentum, analyze_linear_momentum, atol=self.atol)
@utx.skipIfMissingFeatures(['EXTERNAL_FORCES'])
class LBCPULinearMomentum(LinearMomentumTest, ut.TestCase):
"""Test for the CPU implementation of the LB."""
lb_class = espressomd.lb.LBFluid
atol = 1e-10
@utx.skipIfMissingGPU()
@utx.skipIfMissingFeatures(['LB_BOUNDARIES_GPU', 'EXTERNAL_FORCES'])
class LBGPULinearMomentum(LinearMomentumTest, ut.TestCase):
"""Test for the GPU implementation of the LB."""
lb_class = espressomd.lb.LBFluidGPU
atol = 1e-6
if __name__ == '__main__':
ut.main()
|
pkreissl/espresso
|
testsuite/python/linear_momentum_lb.py
|
Python
|
gpl-3.0
| 2,919
|
[
"ESPResSo"
] |
4ce4f74d18ff382675b6ce8be275e04567f52bf2e27b0c141db756eeaa320a98
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGenomicalignments(RPackage):
"""Provides efficient containers for storing and manipulating short genomic
alignments (typically obtained by aligning short reads to a reference
genome). This includes read counting, computing the coverage, junction
detection, and working with the nucleotide content of the alignments."""
homepage = "https://bioconductor.org/packages/GenomicAlignments/"
url = "https://git.bioconductor.org/packages/GenomicAlignments"
list_url = homepage
version('1.12.2', git='https://git.bioconductor.org/packages/GenomicAlignments', commit='b5d6f19e4a89b6c1c3e9e58e5ea4eb13870874ef')
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-summarizedexperiment', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
depends_on('r-rsamtools', type=('build', 'run'))
depends_on('r-biocparallel', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@1.12.2')
|
lgarren/spack
|
var/spack/repos/builtin/packages/r-genomicalignments/package.py
|
Python
|
lgpl-2.1
| 2,452
|
[
"Bioconductor"
] |
b5284c3b1a10ca50a3611d02e94f5667f321e0a4a6ae4ae746c433a928f2db42
|
#!/usr/bin/env python3
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import vtk
import chigger
camera = vtk.vtkCamera()
camera.SetViewUp(0.1865, 0.6455, 0.7407)
camera.SetPosition(3.7586, -11.8847, 9.5357)
camera.SetFocalPoint(0.0000, 0.0000, 0.1250)
reader = chigger.exodus.ExodusReader('../input/mug_blocks_out.e')
exodus0 = chigger.exodus.ExodusSource(reader, block=['1'])
exodus0.update()
exodus1 = chigger.exodus.ExodusSource(reader, block=['76'], edges=True, edge_color=[1,0,0], edge_width=1)
exodus1.update()
result = chigger.base.ChiggerResult(exodus0, exodus1, variable='diffused', camera=camera)
window = chigger.RenderWindow(result, size=[300, 300], test=True)
window.update(); window.resetCamera()
window.write('edge.png')
window.start()
|
nuclear-wizard/moose
|
python/chigger/tests/edge/edge.py
|
Python
|
lgpl-2.1
| 1,051
|
[
"MOOSE",
"VTK"
] |
a46bbc55455862351e2ad8df4106107df0d186b8813ee60a3bf27bfb534e7125
|
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from starthinker_ui.recipe.scripts import Script
from starthinker.util.recipe import json_get_fields, dict_to_string, fields_to_string, recipe_markdown_text
AIRFLOW_TEMPLATE = """###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see starthinker/scripts for possible source):
# - Command: "python starthinker_ui/manage.py airflow"
#
###########################################################################
\'\'\'
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer ( recommended ):
From Release: pip install starthinker
From Open Source: pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory ( if pushing local code changes ):
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
If any recipe task has "auth" set to "user" add user credentials:
1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials
--------------------------------------------------------------
If any recipe task has "auth" set to "service" add service credentials:
1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md
--------------------------------------------------------------
{title}
{description}
{instructions}
{links}
--------------------------------------------------------------
This StarThinker DAG can be extended with any additional tasks from the following sources:
- https://google.github.io/starthinker/
- https://github.com/google/starthinker/tree/master/dags
\'\'\'
from starthinker.airflow.factory import DAG_Factory
INPUTS = {inputs}
RECIPE = {recipe}
dag_maker = DAG_Factory('{dag}', RECIPE, INPUTS)
dag = dag_maker.generate()
if __name__ == "__main__":
dag_maker.print_commandline()
"""
def script_to_dag(dag_name,
title,
description,
instructions,
script,
parameters={}):
markdown_description = recipe_markdown_text(description)
formatted_description = markdown_description[0] + '\n\n'
markdown_instructions = recipe_markdown_text(' - ' + '\n - '.join(instructions))
formatted_instructions = markdown_instructions[0]
formatted_description_links = '\n'.join([' {}-{}: {}'.format(i, f[0], f[1]) for i,f in enumerate(markdown_description[1], 1)])
if len(formatted_description_links) > 0:
formatted_description_links = formatted_description_links + '\n'
formatted_instruction_links = '\n'.join([' {}-{}: {}'.format(i, f[0], f[1]) for i,f in enumerate(markdown_instructions[1], 1)])
formatted_links = formatted_description_links + formatted_instruction_links
return AIRFLOW_TEMPLATE.format(**{
'title':title,
'description':formatted_description,
'instructions':formatted_instructions,
'links':formatted_links,
'inputs':fields_to_string(
json_get_fields(script),
parameters
),
'recipe':dict_to_string(
script,
skip=('field',)
),
'dag':dag_name
})
|
google/starthinker
|
starthinker_ui/recipe/dag.py
|
Python
|
apache-2.0
| 5,492
|
[
"VisIt"
] |
1b60f94873430a111972501a8e2441524678be5e2a35be3dbbafa7def3979b03
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyEspressopp(CMakePackage):
"""ESPResSo++ is an extensible, flexible, fast and parallel simulation
software for soft matter research. It is a highly versatile software
package for the scientific simulation and analysis of coarse-grained
atomistic or bead-spring models as they are used in soft matter research
"""
homepage = "https://espressopp.github.io"
url = "https://github.com/espressopp/espressopp/tarball/v1.9.4.1"
version('develop', git='https://github.com/espressopp/espressopp.git', branch='master')
version('1.9.5', '13a93c30b07132b5e5fa0d828aa17d79')
version('1.9.4.1', '0da74a6d4e1bfa6a2a24fca354245a4f')
version('1.9.4', 'f2a27993a83547ad014335006eea74ea')
variant('ug', default=False, description='Build user guide')
variant('pdf', default=False, description='Build user guide in pdf format')
variant('dg', default=False, description='Build developer guide')
depends_on("cmake@2.8:", type='build')
depends_on("mpi")
depends_on("boost+serialization+filesystem+system+python+mpi", when='@1.9.4:')
extends("python")
depends_on("python@2:2.8")
depends_on("py-mpi4py@2.0.0:", when='@1.9.4', type=('build', 'run'))
depends_on("py-mpi4py@1.3.1:", when='@1.9.4.1:', type=('build', 'run'))
depends_on("fftw")
depends_on("py-sphinx", when="+ug", type='build')
depends_on("py-sphinx", when="+pdf", type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-matplotlib', when="+ug", type='build')
depends_on('py-matplotlib', when="+pdf", type='build')
depends_on("texlive", when="+pdf", type='build')
depends_on("doxygen", when="+dg", type='build')
def cmake_args(self):
return [
'-DEXTERNAL_MPI4PY=ON',
'-DEXTERNAL_BOOST=ON',
'-DWITH_RC_FILES=OFF'
]
def build(self, spec, prefix):
with working_dir(self.build_directory):
make()
if '+ug' in spec:
make("ug", parallel=False)
if '+pdf' in spec:
make("ug-pdf", parallel=False)
if '+dg' in spec:
make("doc", parallel=False)
|
lgarren/spack
|
var/spack/repos/builtin/packages/py-espressopp/package.py
|
Python
|
lgpl-2.1
| 3,451
|
[
"ESPResSo"
] |
aa48b1126dfcbe30ff21e2a63863ae3b07ee44dcdaec5e96dd7051fc385507f2
|
# -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <vincent.michel@inria.fr>
# Minor fixes by Fabian Pedregosa
# Amit Aides <amitibo@tx.technion.ac.il>
# Yehuda Finkelstein <yehudaf@tx.technion.ac.il>
# Lars Buitinck
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.special import logsumexp
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array, deprecated
from .utils.extmath import safe_sparse_dot
from .utils.multiclass import _check_partial_fit_first_call
from .utils.validation import check_is_fitted, check_non_negative, column_or_1d
from .utils.validation import _check_sample_weight
from .utils.validation import _deprecate_positional_args
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB', 'ComplementNB',
'CategoricalNB']
class _BaseNB(ClassifierMixin, BaseEstimator, metaclass=ABCMeta):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape (n_classes, n_samples).
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
@abstractmethod
def _check_X(self, X):
"""To be overridden in subclasses with the actual checks."""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : ndarray of shape (n_samples,)
Predicted target values for X
"""
check_is_fitted(self)
X = self._check_X(X)
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._check_X(X)
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(_BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via :meth:`partial_fit`.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Parameters
----------
priors : array-like of shape (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
var_smoothing : float, default=1e-9
Portion of the largest variance of all features that is added to
variances for calculation stability.
.. versionadded:: 0.20
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
number of training samples observed in each class.
class_prior_ : ndarray of shape (n_classes,)
probability of each class.
classes_ : ndarray of shape (n_classes,)
class labels known to the classifier
epsilon_ : float
absolute additive value to variances
sigma_ : ndarray of shape (n_classes, n_features)
Variance of each feature per class.
.. deprecated:: 1.0
`sigma_` is deprecated in 1.0 and will be removed in 1.2.
Use `var_` instead.
var_ : ndarray of shape (n_classes, n_features)
Variance of each feature per class.
.. versionadded:: 1.0
theta_ : ndarray of shape (n_classes, n_features)
mean of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
@_deprecate_positional_args
def __init__(self, *, priors=None, var_smoothing=1e-9):
self.priors = priors
self.var_smoothing = var_smoothing
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
"""
X, y = self._validate_data(X, y)
y = column_or_1d(y, warn=True)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
def _check_X(self, X):
return check_array(X)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like of shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like of shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like of shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like of shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_new * n_past / n_total) * (mu - new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit : bool, default=False
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X, y = check_X_y(X, y)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
self.epsilon_ = self.var_smoothing * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.var_ = np.zeros((n_classes, n_features))
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
# Initialise the class prior
# Take into account the priors
if self.priors is not None:
priors = np.asarray(self.priors)
# Check that the provide prior match the number of classes
if len(priors) != n_classes:
raise ValueError('Number of priors must match number of'
' classes.')
# Check that the sum is 1
if not np.isclose(priors.sum(), 1.0):
raise ValueError('The sum of the priors should be 1.')
# Check that the prior are non-negative
if (priors < 0).any():
raise ValueError('Priors must be non-negative.')
self.class_prior_ = priors
else:
# Initialize the priors to zeros for each class
self.class_prior_ = np.zeros(len(self.classes_),
dtype=np.float64)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.var_[:, :] -= self.epsilon_
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = np.in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(unique_y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.var_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.var_[i, :] = new_sigma
self.class_count_[i] += N_i
self.var_[:, :] += self.epsilon_
# Update if only no priors is provided
if self.priors is None:
# Empirical prior, with sample_weight taken into account
self.class_prior_ = self.class_count_ / self.class_count_.sum()
return self
def _joint_log_likelihood(self, X):
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.var_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.var_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
@deprecated( # type: ignore
"Attribute sigma_ was deprecated in 1.0 and will be removed in"
"1.2. Use var_ instead."
)
@property
def sigma_(self):
return self.var_
_ALPHA_MIN = 1e-10
class _BaseDiscreteNB(_BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per _BaseNB
"""
def _check_X(self, X):
return check_array(X, accept_sparse='csr')
def _check_X_y(self, X, y):
return self._validate_data(X, y, accept_sparse='csr')
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
with warnings.catch_warnings():
# silence the warning when count is 0 because class was not yet
# observed
warnings.simplefilter("ignore", RuntimeWarning)
log_class_count = np.log(self.class_count_)
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (log_class_count -
np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.full(n_classes, -np.log(n_classes))
def _check_alpha(self):
if np.min(self.alpha) < 0:
raise ValueError('Smoothing parameter alpha = %.1e. '
'alpha should be > 0.' % np.min(self.alpha))
if isinstance(self.alpha, np.ndarray):
if not self.alpha.shape[0] == self.n_features_:
raise ValueError("alpha should be a scalar or a numpy array "
"with shape [n_features]")
if np.min(self.alpha) < _ALPHA_MIN:
warnings.warn('alpha too small will result in numeric errors, '
'setting alpha = %.1e' % _ALPHA_MIN)
return np.maximum(self.alpha, _ALPHA_MIN)
return self.alpha
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X, y = self._check_X_y(X, y)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self._init_counters(n_effective_classes, n_features)
self.n_features_ = n_features
elif n_features != self.n_features_:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.n_features_))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64, copy=False)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = np.atleast_2d(sample_weight)
Y *= sample_weight.T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X, y = self._check_X_y(X, y)
_, n_features = X.shape
self.n_features_ = n_features
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
if sample_weight is not None:
Y = Y.astype(np.float64, copy=False)
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = np.atleast_2d(sample_weight)
Y *= sample_weight.T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self._init_counters(n_effective_classes, n_features)
self._count(X, Y)
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
def _init_counters(self, n_effective_classes, n_features):
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
# mypy error: Decorated property not supported
@deprecated("Attribute coef_ was deprecated in " # type: ignore
"version 0.24 and will be removed in 1.1 (renaming of 0.26).")
@property
def coef_(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
# mypy error: Decorated property not supported
@deprecated("Attribute intercept_ was deprecated in " # type: ignore
"version 0.24 and will be removed in 1.1 (renaming of 0.26).")
@property
def intercept_(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
def _more_tags(self):
return {'poor_score': True}
class MultinomialNB(_BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes, )
Smoothed empirical log probability for each class.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
coef_ : ndarray of shape (n_classes, n_features)
Mirrors ``feature_log_prob_`` for interpreting `MultinomialNB`
as a linear model.
.. deprecated:: 0.24
``coef_`` is deprecated in 0.24 and will be removed in 1.1
(renaming of 0.26).
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
intercept_ : ndarray of shape (n_classes,)
Mirrors ``class_log_prior_`` for interpreting `MultinomialNB`
as a linear model.
.. deprecated:: 0.24
``intercept_`` is deprecated in 0.24 and will be removed in 1.1
(renaming of 0.26).
n_features_ : int
Number of features of each sample.
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB()
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
@_deprecate_positional_args
def __init__(self, *, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _more_tags(self):
return {'requires_positive_X': True}
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
check_non_negative(X, "MultinomialNB (input X)")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
return (safe_sparse_dot(X, self.feature_log_prob_.T) +
self.class_log_prior_)
class ComplementNB(_BaseDiscreteNB):
"""The Complement Naive Bayes classifier described in Rennie et al. (2003).
The Complement Naive Bayes classifier was designed to correct the "severe
assumptions" made by the standard Multinomial Naive Bayes classifier. It is
particularly suited for imbalanced data sets.
Read more in the :ref:`User Guide <complement_naive_bayes>`.
.. versionadded:: 0.20
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing).
fit_prior : bool, default=True
Only used in edge case with a single class in the training set.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. Not used.
norm : bool, default=False
Whether or not a second normalization of the weights is performed. The
default behavior mirrors the implementations found in Mahout and Weka,
which do not follow the full algorithm described in Table 9 of the
paper.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class. Only used in edge
case with a single class in the training set.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
coef_ : ndarray of shape (n_classes, n_features)
Mirrors ``feature_log_prob_`` for interpreting `ComplementNB`
as a linear model.
.. deprecated:: 0.24
``coef_`` is deprecated in 0.24 and will be removed in 1.1
(renaming of 0.26).
feature_all_ : ndarray of shape (n_features,)
Number of samples encountered for each feature during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature) during fitting.
This value is weighted by the sample weight when provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical weights for class complements.
intercept_ : ndarray of shape (n_classes,)
Mirrors ``class_log_prior_`` for interpreting `ComplementNB`
as a linear model.
.. deprecated:: 0.24
``coef_`` is deprecated in 0.24 and will be removed in 1.1
(renaming of 0.26).
n_features_ : int
Number of features of each sample.
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import ComplementNB
>>> clf = ComplementNB()
>>> clf.fit(X, y)
ComplementNB()
>>> print(clf.predict(X[2:3]))
[3]
References
----------
Rennie, J. D., Shih, L., Teevan, J., & Karger, D. R. (2003).
Tackling the poor assumptions of naive bayes text classifiers. In ICML
(Vol. 3, pp. 616-623).
https://people.csail.mit.edu/jrennie/papers/icml03-nb.pdf
"""
@_deprecate_positional_args
def __init__(self, *, alpha=1.0, fit_prior=True, class_prior=None,
norm=False):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
self.norm = norm
def _more_tags(self):
return {'requires_positive_X': True}
def _count(self, X, Y):
"""Count feature occurrences."""
check_non_negative(X, "ComplementNB (input X)")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
self.feature_all_ = self.feature_count_.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and compute the weights."""
comp_count = self.feature_all_ + alpha - self.feature_count_
logged = np.log(comp_count / comp_count.sum(axis=1, keepdims=True))
# _BaseNB.predict uses argmax, but ComplementNB operates with argmin.
if self.norm:
summed = logged.sum(axis=1, keepdims=True)
feature_log_prob = logged / summed
else:
feature_log_prob = -logged
self.feature_log_prob_ = feature_log_prob
def _joint_log_likelihood(self, X):
"""Calculate the class scores for the samples in X."""
jll = safe_sparse_dot(X, self.feature_log_prob_.T)
if len(self.classes_) == 1:
jll += self.class_log_prior_
return jll
class BernoulliNB(_BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, default=0.0
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_count_ : ndarray of shape (n_classes)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes)
Log probability of each class (smoothed).
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
coef_ : ndarray of shape (n_classes, n_features)
Mirrors ``feature_log_prob_`` for interpreting `BernoulliNB`
as a linear model.
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical log probability of features given a class, P(x_i|y).
intercept_ : ndarray of shape (n_classes,)
Mirrors ``class_log_prior_`` for interpreting `BernoulliNB`
as a linear model.
n_features_ : int
Number of features of each sample.
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB()
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
@_deprecate_positional_args
def __init__(self, *, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _check_X(self, X):
X = super()._check_X(X)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
return X
def _check_X_y(self, X, y):
X, y = super()._check_X_y(X, y)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
return X, y
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = self.class_count_ + alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
class CategoricalNB(_BaseDiscreteNB):
"""Naive Bayes classifier for categorical features
The categorical Naive Bayes classifier is suitable for classification with
discrete features that are categorically distributed. The categories of
each feature are drawn from a categorical distribution.
Read more in the :ref:`User Guide <categorical_naive_bayes>`.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
min_categories : int or array-like of shape (n_features,), default=None
Minimum number of categories per feature.
- integer: Sets the minimum number of categories per feature to
`n_categories` for each features.
- array-like: shape (n_features,) where `n_categories[i]` holds the
minimum number of categories for the ith column of the input.
- None (default): Determines the number of categories automatically
from the training data.
.. versionadded:: 0.24
Attributes
----------
category_count_ : list of arrays of shape (n_features,)
Holds arrays of shape (n_classes, n_categories of respective feature)
for each feature. Each array provides the number of samples
encountered for each class and category of the specific feature.
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_log_prob_ : list of arrays of shape (n_features,)
Holds arrays of shape (n_classes, n_categories of respective feature)
for each feature. Each array provides the empirical log probability
of categories given the respective feature and class, ``P(x_i|y)``.
n_features_ : int
Number of features of each sample.
n_categories_ : ndarray of shape (n_features,), dtype=np.int64
Number of categories for each feature. This value is
inferred from the data or set by the minimum number of categories.
.. versionadded:: 0.24
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import CategoricalNB
>>> clf = CategoricalNB()
>>> clf.fit(X, y)
CategoricalNB()
>>> print(clf.predict(X[2:3]))
[3]
"""
@_deprecate_positional_args
def __init__(self, *, alpha=1.0, fit_prior=True, class_prior=None,
min_categories=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
self.min_categories = min_categories
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features. Here, each feature of X is
assumed to be from a different categorical distribution.
It is further assumed that all categories of each feature are
represented by the numbers 0, ..., n - 1, where n refers to the
total number of categories for the given feature. This can, for
instance, be achieved with the help of OrdinalEncoder.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
return super().fit(X, y, sample_weight=sample_weight)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features. Here, each feature of X is
assumed to be from a different categorical distribution.
It is further assumed that all categories of each feature are
represented by the numbers 0, ..., n - 1, where n refers to the
total number of categories for the given feature. This can, for
instance, be achieved with the help of OrdinalEncoder.
y : array-like of shape (n_samples)
Target values.
classes : array-like of shape (n_classes), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
return super().partial_fit(X, y, classes,
sample_weight=sample_weight)
def _more_tags(self):
return {'requires_positive_X': True}
def _check_X(self, X):
X = check_array(X, dtype='int', accept_sparse=False,
force_all_finite=True)
check_non_negative(X, "CategoricalNB (input X)")
return X
def _check_X_y(self, X, y):
X, y = self._validate_data(X, y, dtype='int', accept_sparse=False,
force_all_finite=True)
check_non_negative(X, "CategoricalNB (input X)")
return X, y
def _init_counters(self, n_effective_classes, n_features):
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.category_count_ = [np.zeros((n_effective_classes, 0))
for _ in range(n_features)]
@staticmethod
def _validate_n_categories(X, min_categories):
# rely on max for n_categories categories are encoded between 0...n-1
n_categories_X = X.max(axis=0) + 1
min_categories_ = np.array(min_categories)
if min_categories is not None:
if not np.issubdtype(min_categories_.dtype, np.signedinteger):
raise ValueError(
f"'min_categories' should have integral type. Got "
f"{min_categories_.dtype} instead."
)
n_categories_ = np.maximum(n_categories_X,
min_categories_,
dtype=np.int64)
if n_categories_.shape != n_categories_X.shape:
raise ValueError(
f"'min_categories' should have shape ({X.shape[1]},"
f") when an array-like is provided. Got"
f" {min_categories_.shape} instead."
)
return n_categories_
else:
return n_categories_X
def _count(self, X, Y):
def _update_cat_count_dims(cat_count, highest_feature):
diff = highest_feature + 1 - cat_count.shape[1]
if diff > 0:
# we append a column full of zeros for each new category
return np.pad(cat_count, [(0, 0), (0, diff)], 'constant')
return cat_count
def _update_cat_count(X_feature, Y, cat_count, n_classes):
for j in range(n_classes):
mask = Y[:, j].astype(bool)
if Y.dtype.type == np.int64:
weights = None
else:
weights = Y[mask, j]
counts = np.bincount(X_feature[mask], weights=weights)
indices = np.nonzero(counts)[0]
cat_count[j, indices] += counts[indices]
self.class_count_ += Y.sum(axis=0)
self.n_categories_ = self._validate_n_categories(
X, self.min_categories)
for i in range(self.n_features_):
X_feature = X[:, i]
self.category_count_[i] = _update_cat_count_dims(
self.category_count_[i], self.n_categories_[i] - 1)
_update_cat_count(X_feature, Y,
self.category_count_[i],
self.class_count_.shape[0])
def _update_feature_log_prob(self, alpha):
feature_log_prob = []
for i in range(self.n_features_):
smoothed_cat_count = self.category_count_[i] + alpha
smoothed_class_count = smoothed_cat_count.sum(axis=1)
feature_log_prob.append(
np.log(smoothed_cat_count) -
np.log(smoothed_class_count.reshape(-1, 1)))
self.feature_log_prob_ = feature_log_prob
def _joint_log_likelihood(self, X):
if not X.shape[1] == self.n_features_:
raise ValueError("Expected input with %d features, got %d instead"
% (self.n_features_, X.shape[1]))
jll = np.zeros((X.shape[0], self.class_count_.shape[0]))
for i in range(self.n_features_):
indices = X[:, i]
jll += self.feature_log_prob_[i][:, indices].T
total_ll = jll + self.class_log_prior_
return total_ll
|
xuewei4d/scikit-learn
|
sklearn/naive_bayes.py
|
Python
|
bsd-3-clause
| 49,553
|
[
"Gaussian"
] |
165477d6889b76cd806487c29c48813f298830441be5d0976f9befee787b2ec8
|
#!/usr/bin/env python
#
# License: BSD
# https://raw.githubusercontent.com/stonier/py_trees/devel/LICENSE
#
##############################################################################
# Documentation
##############################################################################
"""
The core behaviour template. All behaviours, standalone and composite, inherit
from this class.
"""
##############################################################################
# Imports
##############################################################################
import re
import uuid
from . import logging
from . import common
from .common import Status
##############################################################################
# Behaviour BluePrint
##############################################################################
class Behaviour(object):
"""
Defines the basic properties and methods required of a node in a
behaviour tree.
Uses all the whizbang tricks from coroutines and generators to do this
as optimally as you may in python. When implementing your own behaviour,
subclass this class.
Args:
name (:obj:`str`): the behaviour name
*args: variable length argument list.
**kwargs: arbitrary keyword arguments.
Attributes:
name (:obj:`str`): the behaviour name
status (:class:`~py_trees.common.Status`): the behaviour status (:data:`~py_trees.common.Status.INVALID`, :data:`~py_trees.common.Status.RUNNING`, :data:`~py_trees.common.Status.FAILURE`, :data:`~py_trees.common.Status.SUCCESS`)
parent (:class:`~py_trees.behaviour.Behaviour`): a :class:`~py_trees.composites.Composite` instance if nested in a tree, otherwise None
children ([:class:`~py_trees.behaviour.Behaviour`]): empty for regular behaviours, populated for composites
feedback_message(:obj:`str`): a simple message used to notify of significant happenings
blackbox_level (:class:`~py_trees.common.BlackBoxLevel`): a helper variable for dot graphs and runtime gui's to collapse/explode entire subtrees dependent upon the blackbox level.
.. seealso::
* :ref:`Skeleton Behaviour Template <skeleton-behaviour-include>`
* :ref:`The Lifecycle Demo <py-trees-demo-behaviour-lifecycle-program>`
* :ref:`The Action Behaviour Demo <py-trees-demo-action-behaviour-program>`
"""
def __init__(self, name="", *args, **kwargs):
try:
assert isinstance(name, basestring), "a behaviour name should be a string, but you passed in %s" % type(name) # python2 compatibility
except NameError:
assert isinstance(name, str), "a behaviour name should be a string, but you passed in %s" % type(name)
self.id = uuid.uuid4() # used to uniquely identify this node (helps with removing children from a tree)
self.name = name
self.status = Status.INVALID
self.iterator = self.tick()
self.parent = None # will get set if a behaviour is added to a composite
self.children = [] # only set by composite behaviours
self.logger = logging.Logger(name)
self.feedback_message = "" # useful for debugging, or human readable updates, but not necessary to implement
self.blackbox_level = common.BlackBoxLevel.NOT_A_BLACKBOX
############################################
# User Customisable Functions (virtual)
############################################
def setup(self, timeout):
"""
Subclasses may override this method to do any one-time delayed construction that
is necessary for runtime. This is best done here rather than in the constructor
so that trees can be instantiated on the fly without any severe runtime requirements
(e.g. a hardware sensor) on any pc to produce visualisations such as dot graphs.
.. note:: User Customisable Callback
Args:
timeout (:obj:`float`): time to wait (0.0 is blocking forever)
Returns:
:obj:`bool`: whether it timed out trying to setup
"""
return True
def initialise(self):
"""
.. note:: User Customisable Callback
Subclasses may override this method to perform any necessary initialising/clearing/resetting
of variables when when preparing to enter this behaviour if it was not previously
:data:`~py_trees.common.Status.RUNNING`. i.e. Expect this to trigger more than once!
"""
pass
def terminate(self, new_status):
"""
.. note:: User Customisable Callback
Subclasses may override this method to clean up. It will be triggered when a behaviour either
finishes execution (switching from :data:`~py_trees.common.Status.RUNNING`
to :data:`~py_trees.common.Status.FAILURE` || :data:`~py_trees.common.Status.SUCCESS`)
or it got interrupted by a higher priority branch (switching to
:data:`~py_trees.common.Status.INVALID`). Remember that the :meth:`~py_trees.behaviour.Behaviour.initialise` method
will handle resetting of variables before re-entry, so this method is about
disabling resources until this behaviour's next tick. This could be a indeterminably
long time. e.g.
* cancel an external action that got started
* shut down any tempoarary communication handles
Args:
new_status (:class:`~py_trees.common.Status`): the behaviour is transitioning to this new status
.. warning:: Do not set `self.status = new_status` here, that is automatically handled
by the :meth:`~py_trees.behaviour.Behaviour.stop` method. Use the argument purely for introspection purposes (e.g.
comparing the current state in `self.status` with the state it will transition to in
`new_status`.
"""
pass
def update(self):
"""
.. note:: User Customisable Callback
Returns:
:class:`~py_trees.common.Status`: the behaviour's new status :class:`~py_trees.common.Status`
Subclasses may override this method to perform any logic required to
arrive at a decision on the behaviour's new status. It is the primary worker function called on
by the :meth:`~py_trees.behaviour.Behaviour.tick` mechanism.
.. tip:: This method should be almost instantaneous and non-blocking
"""
return Status.INVALID
############################################
# User Methods
############################################
def tick_once(self):
"""
A direct means of calling tick on this object without
using the generator mechanism.
"""
# no logger necessary here...it directly relays to tick
for unused in self.tick():
pass
############################################
# Workers
############################################
def has_parent_with_name(self, name):
"""
Searches through this behaviour's parents, and their parents, looking for
a behaviour with the same name as that specified.
Args:
name (:obj:`str`): name of the parent to match, can be a regular expression
Returns:
bool: whether a parent was found or not
"""
pattern = re.compile(name)
b = self
while b.parent is not None:
if pattern.match(b.parent.name) is not None:
return True
b = b.parent
return False
def has_parent_with_instance_type(self, instance_type):
"""
Moves up through this behaviour's parents looking for
a behaviour with the same instance type as that specified.
Args:
instance_type (:obj:`str`): instance type of the parent to match
Returns:
bool: whether a parent was found or not
"""
b = self
while b.parent is not None:
if isinstance(b.parent, instance_type):
return True
b = b.parent
return False
def tip(self):
"""
Get the *tip* of this behaviour's subtree (if it has one) after it's last
tick. This corresponds to the the deepest node that was running before the
subtree traversal reversed direction and headed back to this node.
Returns:
:class:`~py_trees.behaviour.Behaviour` or :obj:`None`: child behaviour, itself or :obj:`None` if its status is :data:`~py_trees.common.Status.INVALID`
"""
return self if self.status != Status.INVALID else None
def visit(self, visitor):
"""
This is functionality that enables external introspection into the behaviour. It gets used
by the tree manager classes to collect information as ticking traverses a tree.
Args:
visitor (:obj:`object`): the visiting class, must have a run(:class:`~py_trees.behaviour.Behaviour`) method.
"""
visitor.run(self)
def tick(self):
"""
This function is a generator that can be used by an iterator on
an entire behaviour tree. It handles the logic for deciding when to
call the user's :meth:`~py_trees.behaviour.Behaviour.initialise` and :meth:`~py_trees.behaviour.Behaviour.terminate` methods as well as making the
actual call to the user's :meth:`~py_trees.behaviour.Behaviour.update` method that determines the
behaviour's new status once the tick has finished. Once done, it will
then yield itself (generator mechanism) so that it can be used as part of
an iterator for the entire tree.
.. code-block:: python
for node in my_behaviour.tick():
print("Do something")
.. note::
This is a generator function, you must use this with *yield*. If you need a direct call,
prefer :meth:`~py_trees.behaviour.Behaviour.tick_once` instead.
Yields:
:class:`~py_trees.behaviour.Behaviour`: a reference to itself
"""
self.logger.debug("%s.tick()" % (self.__class__.__name__))
if self.status != Status.RUNNING:
self.initialise()
# don't set self.status yet, terminate() may need to check what the current state is first
new_status = self.update()
if new_status not in list(Status):
self.logger.error("A behaviour returned an invalid status, setting to INVALID [%s][%s]" % (new_status, self.name))
new_status = Status.INVALID
if new_status != Status.RUNNING:
self.stop(new_status)
self.status = new_status
yield self
def iterate(self, direct_descendants=False):
"""
Generator that provides iteration over this behaviour and all its children.
To traverse the entire tree:
.. code-block:: python
for node in my_behaviour.iterate():
print("Name: {0}".format(node.name))
Args:
direct_descendants (:obj:`bool`): only yield children one step away from this behaviour.
Yields:
:class:`~py_trees.behaviour.Behaviour`: one of it's children
"""
for child in self.children:
if not direct_descendants:
for node in child.iterate():
yield node
else:
yield child
yield self
def stop(self, new_status=Status.INVALID):
"""
Args:
new_status (:class:`~py_trees.common.Status`): the behaviour is transitioning to this new status
This calls the user defined :meth:`~py_trees.behaviour.Behaviour.terminate` method and also resets the
generator. It will finally set the new status once the user's :meth:`~py_trees.behaviour.Behaviour.terminate`
function has been called.
.. warning:: Do not use this method, override :meth:`~py_trees.behaviour.Behaviour.terminate` instead.
"""
self.logger.debug("%s.stop(%s)" % (self.__class__.__name__, "%s->%s" % (self.status, new_status) if self.status != new_status else "%s" % new_status))
self.terminate(new_status)
self.status = new_status
self.iterator = self.tick()
|
stonier/py_trees_suite
|
py_trees/behaviour.py
|
Python
|
bsd-3-clause
| 12,296
|
[
"VisIt"
] |
30d6fefa17a877ea8561b7b30cbae045977d69e07ba883484353752c697dd428
|
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
os.environ['QT_API'] = 'pyqt'
import numpy as np
import sympy as sp
import networkx as nx
import itertools as it
from pyface.qt import QtGui, QtCore
from traits.api import HasTraits, Instance, on_trait_change
from traitsui.api import View, Item
from mayavi.core.ui.api import MayaviScene, MlabSceneModel, \
SceneEditor
pi = np.pi
N = 50
eps = 1/N
BLACK = (0, 0, 0)
class Visualization(HasTraits):
scene = Instance(MlabSceneModel, ())
view = View(Item('scene', editor=SceneEditor(scene_class=MayaviScene),
height=250, width=300, show_label=False),
resizable=True)
def __init__(self, surf, **traits):
super(HasTraits, self).__init__(**traits)
self.surf = surf
self.plane = surf.plane
self.f = sp.lambdify(self.surf.pvars, self.surf.parameterization,
[{'ImmutableMatrix': np.array}, "numpy"])
self.figure = self.scene.mlab.gcf()
self.figure2 = self.scene.mlab.figure(2)
self.plane_points = []
self.surf_points = []
self.j = self.surf.parameterization.jacobian(self.pvars)
self.G = (self.j.transpose()*self.j)
self.G.simplify()
self.Ginv = self.G.inv()
# self.imesh = InducedMesh(10, self.f)
def christoffel(self, k):
return sp.Matrix([[self.christoffel_ijk(0, 0, k), self.christoffel_ijk(0, 1, k)],
[self.christoffel_ijk(1, 0, k), self.christoffel_ijk(1, 1, k)]])
def christoffel_ijk(self, i, j, k):
k = k - 1
sum = 0
for l in range(0, 2):
sum += self.Ginv[k, l] * (sp.diff(self.G[j, l], self.surf.pvars[i]) +
sp.diff(self.G[l, i], self.surf.pvars[j]) -
sp.diff(self.G[i, j], self.surf.pvars[l]))
return sum/2
def curve(self, l):
"""Given a plane curve l, find the surface curve"""
return [self.f(i[0], i[1]) for i in l]
def y_pk(self, p, k, x):
k = k - 1
if p == 0 or p == N:
return x[p][k]
sum = 0
for e in it.product(range(0, 2), range(0, 2)):
i, j = e
ch = self.christoffel_ijk(i, j, k)
sum += ch.subs(zip(self.surf.pvars, x[p])) * \
(x[p+1][i] - x[p-1][i]) * (x[p+1][j] - x[p-1][j])
sum = (x[p+1][k] + x[p-1][k])/2 + sum/4
@on_trait_change('scene.activated')
def update_plot(self):
x, y, z = self.f(self.plane.x, self.plane.y)[0]
self.scene.mlab.mesh(x, y, z, color=(0, 1, 0))
self.scene.mlab.figure(self.figure2)
self.scene.mlab.clf()
picker = self.figure2.on_mouse_pick(self.onpick)
picker.tolerance = 0.01
self.scene.mlab.surf(self.plane.x, self.plane.y, self.plane.f())
def onpick(self, event):
point = event.pick_position
added = False
if self.plane.in_plane(point) and point not in self.plane_points:
print "adding point",
self.plane_points.append(point)
self.scene.mlab.points3d(*point, scale_factor=0.1)
self.scene.mlab.text3d(*point, scale=0.2, color=BLACK,
text=str(len(self.plane_points)))
added = True
else:
print "plane already contains point",
print point
l = None
if len(self.plane_points) >= 2 and added:
start = self.plane_points[-2]
l = self.plane.line(start, point)
x = []
y = []
z = []
for i in l:
x.append(i[0])
y.append(i[1])
z.append(i[2])
self.scene.mlab.plot3d(x, y, z, tube_radius=0.01)
if added:
self.scene.mlab.figure(self.figure)
spoint = self.surf.f(point[0], point[1])[0]
self.surf_points.append(spoint)
self.scene.mlab.points3d(*spoint, scale_factor=0.1)
self.scene.mlab.text3d(*spoint, scale=0.2, color=BLACK,
text=str(len(self.surf_points)))
if l is not None:
c = self.curve(l)
x = []
y = []
z = []
for i in c:
j = i[0]
x.append(j[0])
y.append(j[1])
z.append(j[2])
self.scene.mlab.plot3d(x, y, z, tube_radius=0.01, color=BLACK)
self.scene.mlab.figure(self.figure2)
class Plane():
def __init__(self, x1, xs, y1, ys, x0=0, y0=0, z=0):
self.x0 = x0
self.x1 = x1
self.y0 = y0
self.y1 = y1
self.z = z
self.x, self.y = np.mgrid[x0:x1:xs, y0:y1:ys]
def f(self):
return np.array([[self.z]*self.x.shape[1]]*self.x.shape[0])
def in_plane(self, p):
"""Checks if the point p is contained in the plane."""
x, y, z = p
if self.x0 <= x and x <= self.x1:
if self.y0 <= y and y <= self.y1:
if self.z == z:
return True
return False
def line(self, p, q):
"""Line from p to q, ie. p + t*(q-p)"""
t = np.linspace(0, 1, N)
p_ = np.array(p)
q_ = np.array(q)
return np.array([p_ + i*(q_-p_) for i in t])
class InducedMesh():
def __init__(self, size, f):
self.size = size
self.f = f
self.x, self.y = np.mgrid[0:1:1j*self.size, 0:1:1j*self.size]
self.w = self.f(self.x, self.y)[0]
self.graph = nx.Graph()
self.graph.add_nodes_from(range(0, self.size**2))
class Sphere(Visualization):
def __init__(self, radius):
self.plane = Plane(pi, 101j, 2 * pi, 101j)
self.radius = radius
self.theta, self.phi = sp.symbols("theta phi")
self.pvars = sp.Matrix([self.theta, self.phi])
self.parameterization = sp.Matrix([[self.radius * sp.sin(self.phi) *
sp.cos(self.theta),
self.radius * sp.sin(self.phi) *
sp.sin(self.theta),
self.radius * sp.cos(self.phi)]])
super(Sphere, self).__init__(self)
# @on_trait_change('scene.activated')
# def update_plot(self):
# super(Sphere, self).update_plot()
class Torus(Visualization):
def __init__(self, R, r):
self.plane = Plane(2 * pi, 101j, 2 * pi, 101j)
self.r = r
self.R = R
self.theta, self.phi = sp.symbols("theta phi")
self.pvars = sp.Matrix([self.theta, self.phi])
self.parameterization = sp.Matrix(
[[(self.R + self.r*sp.cos(self.phi))*sp.cos(self.theta),
(self.R + self.r*sp.cos(self.phi))*sp.sin(self.theta),
self.r*sp.sin(self.phi)]])
super(Torus, self).__init__(self)
class MayaviQWidget(QtGui.QWidget):
def __init__(self, visualization, parent=None):
QtGui.QWidget.__init__(self, parent)
layout = QtGui.QVBoxLayout(self)
layout.setContentsMargins(0,0,0,0)
layout.setSpacing(0)
self.visualization = visualization
self.ui = self.visualization.edit_traits(parent=self,
kind='subpanel').control
layout.addWidget(self.ui)
self.ui.setParent(self)
if __name__ == "__main__":
app = QtGui.QApplication.instance()
container = QtGui.QWidget()
container.setWindowTitle("Embedding Mayavi in a PyQt4 Application")
layout = QtGui.QGridLayout(container)
# s = Sphere(1)
s = Torus(3, 2)
mayavi_widget = MayaviQWidget(s, container)
layout.addWidget(mayavi_widget, 1, 1)
label = QtGui.QLabel(container)
label.setText("hi")
layout.addWidget(label, 1, 2)
container.show()
window = QtGui.QMainWindow()
window.setCentralWidget(container)
window.show()
app.exec_()
else:
s = Sphere(1)
print s.christoffel(1)
|
syamajala/geodesic
|
geodesic.py
|
Python
|
gpl-2.0
| 8,162
|
[
"Mayavi"
] |
b264e1adf99fc9de9d4fc77650f423e3cf1237a04f0188d2d1e807aa8b700267
|
# -*- coding: utf-8 -*-
#
# BrodyHopfield.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Spike synchronization through subthreshold oscillation
------------------------------------------------------
This script reproduces the spike synchronization behavior
of integrate-and-fire neurons in response to a subthreshold
oscillation. This phenomenon is shown in Fig. 1 of
C.D. Brody and J.J. Hopfield
Simple Networks for Spike-Timing-Based Computation,
with Application to Olfactory Processing
Neuron 37, 843-852 (2003)
Neurons receive a weak 35 Hz oscillation, a gaussian noise current
and an increasing DC. The time-locking capability is shown to
depend on the input current given. The result is then plotted using
pylab. All parameters are taken from the above paper.
'''
'''
First, we import all necessary modules for simulation, analysis and
plotting.
'''
import nest
import nest.raster_plot
'''
Second, the simulation parameters are assigned to variables.
'''
N = 1000 # number of neurons
bias_begin = 140. # minimal value for the bias current injection [pA]
bias_end = 200. # maximal value for the bias current injection [pA]
T = 600 # simulation time (ms)
# parameters for the alternative-current generator
driveparams = {'amplitude': 50., 'frequency': 35.}
# parameters for the noise generator
noiseparams = {'mean': 0.0, 'std': 200.}
neuronparams = {'tau_m': 20., # membrane time constant
'V_th': 20., # threshold potential
'E_L': 10., # membrane resting potential
't_ref': 2., # refractory period
'V_reset': 0., # reset potential
'C_m': 200., # membrane capacitance
'V_m': 0.} # initial membrane potential
'''
Third, the nodes are created using `Create`. We store the returned
handles in variables for later reference.
'''
neurons = nest.Create('iaf_psc_alpha', N)
sd = nest.Create('spike_detector')
noise = nest.Create('noise_generator')
drive = nest.Create('ac_generator')
'''
Set the parameters specified above for the generators using `SetStatus`.
'''
nest.SetStatus(drive, driveparams)
nest.SetStatus(noise, noiseparams)
'''
Set the parameters specified above for the neurons. Nurons getan internal
current. The first neuron additionally receives the current with amplitude
``bias_begin``, the last neuron with amplitude ``bias_end``.
'''
nest.SetStatus(neurons, neuronparams)
nest.SetStatus(neurons, [{'I_e':
(n * (bias_end - bias_begin) / N + bias_begin)}
for n in neurons])
'''
Set the parameters for the `spike_detector`: recorded data should include
the information about global IDs of spiking neurons and the time of
individual spikes.
'''
nest.SetStatus(sd, {"withgid": True, "withtime": True})
'''
Connect alternative current and noise generators as well as `spike_detector`s.
to neurons
'''
nest.DivergentConnect(drive, neurons)
nest.DivergentConnect(noise, neurons)
nest.ConvergentConnect(neurons, sd)
'''
Simulate the network for time T.
'''
nest.Simulate(T)
'''
Plot the raster plot of the neuronal spiking activity.
'''
nest.raster_plot.from_device(sd, hist=True)
|
obreitwi/nest-simulator
|
pynest/examples/BrodyHopfield.py
|
Python
|
gpl-2.0
| 3,850
|
[
"Gaussian",
"NEURON"
] |
f953de862524d61df9531024c940d2eaa6d87ff2d7e67a34e450b4159a1c4ea9
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Denis A. Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import copy as cp
from distutils.version import LooseVersion
import itertools as itt
from math import log
import os
import numpy as np
from scipy import linalg
from .io.write import start_file, end_file
from .io.proj import (make_projector, _proj_equal, activate_proj,
_needs_eeg_average_ref_proj)
from .io import fiff_open
from .io.pick import (pick_types, pick_channels_cov, pick_channels, pick_info,
_picks_by_type, _pick_data_channels)
from .io.constants import FIFF
from .io.meas_info import read_bad_channels
from .io.proj import _read_proj, _write_proj
from .io.tag import find_tag
from .io.tree import dir_tree_find
from .io.write import (start_block, end_block, write_int, write_name_list,
write_double, write_float_matrix, write_string)
from .defaults import _handle_default
from .epochs import Epochs
from .event import make_fixed_length_events
from .utils import (check_fname, logger, verbose, estimate_rank,
_compute_row_norms, check_version, _time_mask, warn,
_check_copy_dep)
from .fixes import in1d
from .externals.six.moves import zip
from .externals.six import string_types
def _check_covs_algebra(cov1, cov2):
if cov1.ch_names != cov2.ch_names:
raise ValueError('Both Covariance do not have the same list of '
'channels.')
projs1 = [str(c) for c in cov1['projs']]
projs2 = [str(c) for c in cov1['projs']]
if projs1 != projs2:
raise ValueError('Both Covariance do not have the same list of '
'SSP projections.')
def _get_tslice(epochs, tmin, tmax):
"""get the slice."""
mask = _time_mask(epochs.times, tmin, tmax, sfreq=epochs.info['sfreq'])
tstart = np.where(mask)[0][0] if tmin is not None else None
tend = np.where(mask)[0][-1] + 1 if tmax is not None else None
tslice = slice(tstart, tend, None)
return tslice
class Covariance(dict):
"""Noise covariance matrix.
.. warning:: This class should not be instantiated directly, but
instead should be created using a covariance reading or
computation function.
Parameters
----------
data : array-like
The data.
names : list of str
Channel names.
bads : list of str
Bad channels.
projs : list
Projection vectors.
nfree : int
Degrees of freedom.
eig : array-like | None
Eigenvalues.
eigvec : array-like | None
Eigenvectors.
method : str | None
The method used to compute the covariance.
loglik : float
The log likelihood.
Attributes
----------
data : array of shape (n_channels, n_channels)
The covariance.
ch_names : list of string
List of channels' names.
nfree : int
Number of degrees of freedom i.e. number of time points used.
See Also
--------
compute_covariance
compute_raw_covariance
make_ad_hoc_cov
read_cov
"""
def __init__(self, data, names, bads, projs, nfree, eig=None, eigvec=None,
method=None, loglik=None):
"""Init of covariance."""
diag = True if data.ndim == 1 else False
self.update(data=data, dim=len(data), names=names, bads=bads,
nfree=nfree, eig=eig, eigvec=eigvec, diag=diag,
projs=projs, kind=FIFF.FIFFV_MNE_NOISE_COV)
if method is not None:
self['method'] = method
if loglik is not None:
self['loglik'] = loglik
@property
def data(self):
"""Numpy array of Noise covariance matrix."""
return self['data']
@property
def ch_names(self):
"""Channel names."""
return self['names']
@property
def nfree(self):
"""Number of degrees of freedom."""
return self['nfree']
def save(self, fname):
"""Save covariance matrix in a FIF file.
Parameters
----------
fname : str
Output filename.
"""
check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz'))
fid = start_file(fname)
try:
_write_cov(fid, self)
except Exception as inst:
fid.close()
os.remove(fname)
raise inst
end_file(fid)
def copy(self):
"""Copy the Covariance object
Returns
-------
cov : instance of Covariance
The copied object.
"""
return cp.deepcopy(self)
def as_diag(self, copy=None):
"""Set covariance to be processed as being diagonal.
Parameters
----------
copy : bool
This parameter has been deprecated and will be removed in 0.13.
Use inst.copy() instead.
Whether to return a new instance or modify in place.
Returns
-------
cov : dict
The covariance.
Notes
-----
This function allows creation of inverse operators
equivalent to using the old "--diagnoise" mne option.
"""
cov = _check_copy_dep(self, copy, default=True)
if cov['diag']:
return cov
cov['diag'] = True
cov['data'] = np.diag(cov['data'])
cov['eig'] = None
cov['eigvec'] = None
return cov
def __repr__(self):
if self.data.ndim == 2:
s = 'size : %s x %s' % self.data.shape
else: # ndim == 1
s = 'diagonal : %s' % self.data.size
s += ", n_samples : %s" % self.nfree
s += ", data : %s" % self.data
return "<Covariance | %s>" % s
def __add__(self, cov):
"""Add Covariance taking into account number of degrees of freedom."""
_check_covs_algebra(self, cov)
this_cov = cp.deepcopy(cov)
this_cov['data'] = (((this_cov['data'] * this_cov['nfree']) +
(self['data'] * self['nfree'])) /
(self['nfree'] + this_cov['nfree']))
this_cov['nfree'] += self['nfree']
this_cov['bads'] = list(set(this_cov['bads']).union(self['bads']))
return this_cov
def __iadd__(self, cov):
"""Add Covariance taking into account number of degrees of freedom."""
_check_covs_algebra(self, cov)
self['data'][:] = (((self['data'] * self['nfree']) +
(cov['data'] * cov['nfree'])) /
(self['nfree'] + cov['nfree']))
self['nfree'] += cov['nfree']
self['bads'] = list(set(self['bads']).union(cov['bads']))
return self
@verbose
def plot(self, info, exclude=[], colorbar=True, proj=False, show_svd=True,
show=True, verbose=None):
"""Plot Covariance data.
Parameters
----------
info: dict
Measurement info.
exclude : list of string | str
List of channels to exclude. If empty do not exclude any channel.
If 'bads', exclude info['bads'].
colorbar : bool
Show colorbar or not.
proj : bool
Apply projections or not.
show_svd : bool
Plot also singular values of the noise covariance for each sensor
type. We show square roots ie. standard deviations.
show : bool
Call pyplot.show() as the end or not.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig_cov : instance of matplotlib.pyplot.Figure
The covariance plot.
fig_svd : instance of matplotlib.pyplot.Figure | None
The SVD spectra plot of the covariance.
"""
from .viz.misc import plot_cov
return plot_cov(self, info, exclude, colorbar, proj, show_svd, show)
###############################################################################
# IO
@verbose
def read_cov(fname, verbose=None):
"""Read a noise covariance from a FIF file.
Parameters
----------
fname : string
The name of file containing the covariance matrix. It should end with
-cov.fif or -cov.fif.gz.
verbose : bool, str, int, or None (default None)
If not None, override default verbose level (see mne.verbose).
Returns
-------
cov : Covariance
The noise covariance matrix.
See Also
--------
write_cov, compute_covariance, compute_raw_covariance
"""
check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz'))
f, tree = fiff_open(fname)[:2]
with f as fid:
return Covariance(**_read_cov(fid, tree, FIFF.FIFFV_MNE_NOISE_COV,
limited=True))
###############################################################################
# Estimate from data
@verbose
def make_ad_hoc_cov(info, verbose=None):
"""Create an ad hoc noise covariance.
Parameters
----------
info : instance of Info
Measurement info.
verbose : bool, str, int, or None (default None)
If not None, override default verbose level (see mne.verbose).
Returns
-------
cov : instance of Covariance
The ad hoc diagonal noise covariance for the M/EEG data channels.
Notes
-----
.. versionadded:: 0.9.0
"""
info = pick_info(info, pick_types(info, meg=True, eeg=True, exclude=[]))
info._check_consistency()
# Standard deviations to be used
grad_std = 5e-13
mag_std = 20e-15
eeg_std = 0.2e-6
logger.info('Using standard noise values '
'(MEG grad : %6.1f fT/cm MEG mag : %6.1f fT EEG : %6.1f uV)'
% (1e13 * grad_std, 1e15 * mag_std, 1e6 * eeg_std))
data = np.zeros(len(info['ch_names']))
for meg, eeg, val in zip(('grad', 'mag', False), (False, False, True),
(grad_std, mag_std, eeg_std)):
data[pick_types(info, meg=meg, eeg=eeg)] = val * val
return Covariance(data, info['ch_names'], info['bads'], info['projs'],
nfree=0)
def _check_n_samples(n_samples, n_chan):
"""Check to see if there are enough samples for reliable cov calc."""
n_samples_min = 10 * (n_chan + 1) // 2
if n_samples <= 0:
raise ValueError('No samples found to compute the covariance matrix')
if n_samples < n_samples_min:
warn('Too few samples (required : %d got : %d), covariance '
'estimate may be unreliable' % (n_samples_min, n_samples))
@verbose
def compute_raw_covariance(raw, tmin=0, tmax=None, tstep=0.2, reject=None,
flat=None, picks=None, method='empirical',
method_params=None, cv=3, scalings=None, n_jobs=1,
return_estimators=False, verbose=None):
"""Estimate noise covariance matrix from a continuous segment of raw data.
It is typically useful to estimate a noise covariance from empty room
data or time intervals before starting the stimulation.
.. note:: This function will:
1. Partition the data into evenly spaced, equal-length
epochs.
2. Load them into memory.
3. Subtract the mean across all time points and epochs
for each channel.
4. Process the :class:`Epochs` by
:func:`compute_covariance`.
This will produce a slightly different result compared to
using :func:`make_fixed_length_events`, :class:`Epochs`, and
:func:`compute_covariance` directly, since that would (with
the recommended baseline correction) subtract the mean across
time *for each epoch* (instead of across epochs) for each
channel.
Parameters
----------
raw : instance of Raw
Raw data
tmin : float
Beginning of time interval in seconds. Defaults to 0.
tmax : float | None (default None)
End of time interval in seconds. If None (default), use the end of the
recording.
tstep : float (default 0.2)
Length of data chunks for artefact rejection in seconds.
Can also be None to use a single epoch of (tmax - tmin)
duration. This can use a lot of memory for large ``Raw``
instances.
reject : dict | None (default None)
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
flat : dict | None (default None)
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
picks : array-like of int | None (default None)
Indices of channels to include (if None, data channels are used).
method : str | list | None (default 'empirical')
The method used for covariance estimation.
See :func:`mne.compute_covariance`.
.. versionadded:: 0.12
method_params : dict | None (default None)
Additional parameters to the estimation procedure.
See :func:`mne.compute_covariance`.
.. versionadded:: 0.12
cv : int | sklearn cross_validation object (default 3)
The cross validation method. Defaults to 3, which will
internally trigger a default 3-fold shuffle split.
.. versionadded:: 0.12
scalings : dict | None (default None)
Defaults to ``dict(mag=1e15, grad=1e13, eeg=1e6)``.
These defaults will scale magnetometers and gradiometers
at the same unit.
.. versionadded:: 0.12
n_jobs : int (default 1)
Number of jobs to run in parallel.
.. versionadded:: 0.12
return_estimators : bool (default False)
Whether to return all estimators or the best. Only considered if
method equals 'auto' or is a list of str. Defaults to False
.. versionadded:: 0.12
verbose : bool | str | int | None (default None)
If not None, override default verbose level (see mne.verbose).
Returns
-------
cov : instance of Covariance | list
The computed covariance. If method equals 'auto' or is a list of str
and return_estimators equals True, a list of covariance estimators is
returned (sorted by log-likelihood, from high to low, i.e. from best
to worst).
See Also
--------
compute_covariance : Estimate noise covariance matrix from epochs
"""
tmin = 0. if tmin is None else float(tmin)
tmax = raw.times[-1] if tmax is None else float(tmax)
tstep = tmax - tmin if tstep is None else float(tstep)
tstep_m1 = tstep - 1. / raw.info['sfreq'] # inclusive!
events = make_fixed_length_events(raw, 1, tmin, tmax, tstep)
pl = 's' if len(events) != 1 else ''
logger.info('Using up to %s segment%s' % (len(events), pl))
# don't exclude any bad channels, inverses expect all channels present
if picks is None:
# Need to include all channels e.g. if eog rejection is to be used
picks = np.arange(raw.info['nchan'])
pick_mask = in1d(
picks, _pick_data_channels(raw.info, with_ref_meg=False))
else:
pick_mask = slice(None)
epochs = Epochs(raw, events, 1, 0, tstep_m1, baseline=None,
picks=picks, reject=reject, flat=flat, verbose=False,
preload=False, proj=False)
if isinstance(method, string_types) and method == 'empirical':
# potentially *much* more memory efficient to do it the iterative way
picks = picks[pick_mask]
data = 0
n_samples = 0
mu = 0
# Read data in chunks
for raw_segment in epochs:
raw_segment = raw_segment[pick_mask]
mu += raw_segment.sum(axis=1)
data += np.dot(raw_segment, raw_segment.T)
n_samples += raw_segment.shape[1]
_check_n_samples(n_samples, len(picks))
mu /= n_samples
data -= n_samples * mu[:, None] * mu[None, :]
data /= (n_samples - 1.0)
logger.info("Number of samples used : %d" % n_samples)
logger.info('[done]')
ch_names = [raw.info['ch_names'][k] for k in picks]
bads = [b for b in raw.info['bads'] if b in ch_names]
projs = cp.deepcopy(raw.info['projs'])
return Covariance(data, ch_names, bads, projs, nfree=n_samples)
del picks, pick_mask
# This makes it equivalent to what we used to do (and do above for
# empirical mode), treating all epochs as if they were a single long one
epochs.load_data()
ch_means = epochs._data.mean(axis=0).mean(axis=1)
epochs._data -= ch_means[np.newaxis, :, np.newaxis]
# fake this value so there are no complaints from compute_covariance
epochs.baseline = (None, None)
return compute_covariance(epochs, keep_sample_mean=True, method=method,
method_params=method_params, cv=cv,
scalings=scalings, n_jobs=n_jobs,
return_estimators=return_estimators)
@verbose
def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None,
projs=None, method='empirical', method_params=None,
cv=3, scalings=None, n_jobs=1, return_estimators=False,
verbose=None):
"""Estimate noise covariance matrix from epochs.
The noise covariance is typically estimated on pre-stim periods
when the stim onset is defined from events.
If the covariance is computed for multiple event types (events
with different IDs), the following two options can be used and combined:
1. either an Epochs object for each event type is created and
a list of Epochs is passed to this function.
2. an Epochs object is created for multiple events and passed
to this function.
.. note:: Baseline correction should be used when creating the Epochs.
Otherwise the computed covariance matrix will be inaccurate.
.. note:: For multiple event types, it is also possible to create a
single Epochs object with events obtained using
merge_events(). However, the resulting covariance matrix
will only be correct if keep_sample_mean is True.
.. note:: The covariance can be unstable if the number of samples is
not sufficient. In that case it is common to regularize a
covariance estimate. The ``method`` parameter of this
function allows to regularize the covariance in an
automated way. It also allows to select between different
alternative estimation algorithms which themselves achieve
regularization. Details are described in [1]_.
Parameters
----------
epochs : instance of Epochs, or a list of Epochs objects
The epochs.
keep_sample_mean : bool (default True)
If False, the average response over epochs is computed for
each event type and subtracted during the covariance
computation. This is useful if the evoked response from a
previous stimulus extends into the baseline period of the next.
Note. This option is only implemented for method='empirical'.
tmin : float | None (default None)
Start time for baseline. If None start at first sample.
tmax : float | None (default None)
End time for baseline. If None end at last sample.
projs : list of Projection | None (default None)
List of projectors to use in covariance calculation, or None
to indicate that the projectors from the epochs should be
inherited. If None, then projectors from all epochs must match.
method : str | list | None (default 'empirical')
The method used for covariance estimation. If 'empirical' (default),
the sample covariance will be computed. A list can be passed to run a
set of the different methods.
If 'auto' or a list of methods, the best estimator will be determined
based on log-likelihood and cross-validation on unseen data as
described in [1]_. Valid methods are:
* ``'empirical'``: the empirical or sample covariance
* ``'diagonal_fixed'``: a diagonal regularization as in
mne.cov.regularize (see MNE manual)
* ``'ledoit_wolf'``: the Ledoit-Wolf estimator [2]_
* ``'shrunk'``: like 'ledoit_wolf' with cross-validation for
optimal alpha (see scikit-learn documentation on covariance
estimation)
* ``'pca'``: probabilistic PCA with low rank [3]_
* ``'factor_analysis'``: Factor Analysis with low rank [4]_
If ``'auto'``, this expands to::
['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis']
.. note:: ``'ledoit_wolf'`` and ``'pca'`` are similar to
``'shrunk'`` and ``'factor_analysis'``, respectively. They are not
included to avoid redundancy. In most cases ``'shrunk'`` and
``'factor_analysis'`` represent more appropriate default
choices.
The ``'auto'`` mode is not recommended if there are many
segments of data, since computation can take a long time.
.. versionadded:: 0.9.0
method_params : dict | None (default None)
Additional parameters to the estimation procedure. Only considered if
method is not None. Keys must correspond to the value(s) of `method`.
If None (default), expands to::
'empirical': {'store_precision': False, 'assume_centered': True},
'diagonal_fixed': {'grad': 0.01, 'mag': 0.01, 'eeg': 0.0,
'store_precision': False,
'assume_centered': True},
'ledoit_wolf': {'store_precision': False, 'assume_centered': True},
'shrunk': {'shrinkage': np.logspace(-4, 0, 30),
'store_precision': False, 'assume_centered': True},
'pca': {'iter_n_components': None},
'factor_analysis': {'iter_n_components': None}
cv : int | sklearn cross_validation object (default 3)
The cross validation method. Defaults to 3, which will
internally trigger a default 3-fold shuffle split.
scalings : dict | None (default None)
Defaults to ``dict(mag=1e15, grad=1e13, eeg=1e6)``.
These defaults will scale magnetometers and gradiometers
at the same unit.
n_jobs : int (default 1)
Number of jobs to run in parallel.
return_estimators : bool (default False)
Whether to return all estimators or the best. Only considered if
method equals 'auto' or is a list of str. Defaults to False
verbose : bool | str | int | or None (default None)
If not None, override default verbose level (see mne.verbose).
Returns
-------
cov : instance of Covariance | list
The computed covariance. If method equals 'auto' or is a list of str
and return_estimators equals True, a list of covariance estimators is
returned (sorted by log-likelihood, from high to low, i.e. from best
to worst).
See Also
--------
compute_raw_covariance : Estimate noise covariance from raw data
References
----------
.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG
signals, vol. 108, 328-342, NeuroImage.
.. [2] Ledoit, O., Wolf, M., (2004). A well-conditioned estimator for
large-dimensional covariance matrices. Journal of Multivariate
Analysis 88 (2), 365 - 411.
.. [3] Tipping, M. E., Bishop, C. M., (1999). Probabilistic principal
component analysis. Journal of the Royal Statistical Society:
Series B (Statistical Methodology) 61 (3), 611 - 622.
.. [4] Barber, D., (2012). Bayesian reasoning and machine learning.
Cambridge University Press., Algorithm 21.1
"""
accepted_methods = ('auto', 'empirical', 'diagonal_fixed', 'ledoit_wolf',
'shrunk', 'pca', 'factor_analysis',)
msg = ('Invalid method ({method}). Accepted values (individually or '
'in a list) are "%s"' % '" or "'.join(accepted_methods + ('None',)))
# scale to natural unit for best stability with MEG/EEG
if isinstance(scalings, dict):
for k, v in scalings.items():
if k not in ('mag', 'grad', 'eeg'):
raise ValueError('The keys in `scalings` must be "mag" or'
'"grad" or "eeg". You gave me: %s' % k)
scalings = _handle_default('scalings', scalings)
_method_params = {
'empirical': {'store_precision': False, 'assume_centered': True},
'diagonal_fixed': {'grad': 0.01, 'mag': 0.01, 'eeg': 0.0,
'store_precision': False, 'assume_centered': True},
'ledoit_wolf': {'store_precision': False, 'assume_centered': True},
'shrunk': {'shrinkage': np.logspace(-4, 0, 30),
'store_precision': False, 'assume_centered': True},
'pca': {'iter_n_components': None},
'factor_analysis': {'iter_n_components': None}
}
if isinstance(method_params, dict):
for key, values in method_params.items():
if key not in _method_params:
raise ValueError('key (%s) must be "%s"' %
(key, '" or "'.join(_method_params)))
_method_params[key].update(method_params[key])
# for multi condition support epochs is required to refer to a list of
# epochs objects
def _unpack_epochs(epochs):
if len(epochs.event_id) > 1:
epochs = [epochs[k] for k in epochs.event_id]
else:
epochs = [epochs]
return epochs
if not isinstance(epochs, list):
epochs = _unpack_epochs(epochs)
else:
epochs = sum([_unpack_epochs(epoch) for epoch in epochs], [])
# check for baseline correction
for epochs_t in epochs:
if epochs_t.baseline is None and epochs_t.info['highpass'] < 0.5 and \
keep_sample_mean:
warn('Epochs are not baseline corrected, covariance '
'matrix may be inaccurate')
for epoch in epochs:
epoch.info._check_consistency()
bads = epochs[0].info['bads']
if projs is None:
projs = cp.deepcopy(epochs[0].info['projs'])
# make sure Epochs are compatible
for epochs_t in epochs[1:]:
if epochs_t.proj != epochs[0].proj:
raise ValueError('Epochs must agree on the use of projections')
for proj_a, proj_b in zip(epochs_t.info['projs'], projs):
if not _proj_equal(proj_a, proj_b):
raise ValueError('Epochs must have same projectors')
else:
projs = cp.deepcopy(projs)
ch_names = epochs[0].ch_names
# make sure Epochs are compatible
for epochs_t in epochs[1:]:
if epochs_t.info['bads'] != bads:
raise ValueError('Epochs must have same bad channels')
if epochs_t.ch_names != ch_names:
raise ValueError('Epochs must have same channel names')
picks_list = _picks_by_type(epochs[0].info)
picks_meeg = np.concatenate([b for _, b in picks_list])
picks_meeg = np.sort(picks_meeg)
ch_names = [epochs[0].ch_names[k] for k in picks_meeg]
info = epochs[0].info # we will overwrite 'epochs'
if method == 'auto':
method = ['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis']
if not isinstance(method, (list, tuple)):
method = [method]
ok_sklearn = check_version('sklearn', '0.15') is True
if not ok_sklearn and (len(method) != 1 or method[0] != 'empirical'):
raise ValueError('scikit-learn is not installed, `method` must be '
'`empirical`')
if keep_sample_mean is False:
if len(method) != 1 or 'empirical' not in method:
raise ValueError('`keep_sample_mean=False` is only supported'
'with `method="empirical"`')
for p, v in _method_params.items():
if v.get('assume_centered', None) is False:
raise ValueError('`assume_centered` must be True'
' if `keep_sample_mean` is False')
# prepare mean covs
n_epoch_types = len(epochs)
data_mean = [0] * n_epoch_types
n_samples = np.zeros(n_epoch_types, dtype=np.int)
n_epochs = np.zeros(n_epoch_types, dtype=np.int)
for ii, epochs_t in enumerate(epochs):
tslice = _get_tslice(epochs_t, tmin, tmax)
for e in epochs_t:
e = e[picks_meeg, tslice]
if not keep_sample_mean:
data_mean[ii] += e
n_samples[ii] += e.shape[1]
n_epochs[ii] += 1
n_samples_epoch = n_samples // n_epochs
norm_const = np.sum(n_samples_epoch * (n_epochs - 1))
data_mean = [1.0 / n_epoch * np.dot(mean, mean.T) for n_epoch, mean
in zip(n_epochs, data_mean)]
if not all(k in accepted_methods for k in method):
raise ValueError(msg.format(method=method))
info = pick_info(info, picks_meeg)
tslice = _get_tslice(epochs[0], tmin, tmax)
epochs = [ee.get_data()[:, picks_meeg, tslice] for ee in epochs]
picks_meeg = np.arange(len(picks_meeg))
picks_list = _picks_by_type(info)
if len(epochs) > 1:
epochs = np.concatenate(epochs, 0)
else:
epochs = epochs[0]
epochs = np.hstack(epochs)
n_samples_tot = epochs.shape[-1]
_check_n_samples(n_samples_tot, len(picks_meeg))
epochs = epochs.T # sklearn | C-order
if ok_sklearn:
cov_data = _compute_covariance_auto(epochs, method=method,
method_params=_method_params,
info=info,
verbose=verbose,
cv=cv,
n_jobs=n_jobs,
# XXX expose later
stop_early=True, # if needed.
picks_list=picks_list,
scalings=scalings)
else:
if _method_params['empirical']['assume_centered'] is True:
cov = epochs.T.dot(epochs) / n_samples_tot
else:
cov = np.cov(epochs.T, bias=1)
cov_data = {'empirical': {'data': cov}}
if keep_sample_mean is False:
cov = cov_data['empirical']['data']
# undo scaling
cov *= n_samples_tot
# ... apply pre-computed class-wise normalization
for mean_cov in data_mean:
cov -= mean_cov
cov /= norm_const
covs = list()
for this_method, data in cov_data.items():
cov = Covariance(data.pop('data'), ch_names, info['bads'], projs,
nfree=n_samples_tot)
logger.info('Number of samples used : %d' % n_samples_tot)
logger.info('[done]')
# add extra info
cov.update(method=this_method, **data)
covs.append(cov)
if ok_sklearn:
msg = ['log-likelihood on unseen data (descending order):']
logliks = [(c['method'], c['loglik']) for c in covs]
logliks.sort(reverse=True, key=lambda c: c[1])
for k, v in logliks:
msg.append('%s: %0.3f' % (k, v))
logger.info('\n '.join(msg))
if ok_sklearn and not return_estimators:
keys, scores = zip(*[(c['method'], c['loglik']) for c in covs])
out = covs[np.argmax(scores)]
logger.info('selecting best estimator: {0}'.format(out['method']))
elif ok_sklearn:
out = covs
out.sort(key=lambda c: c['loglik'], reverse=True)
else:
out = covs[0]
return out
def _compute_covariance_auto(data, method, info, method_params, cv,
scalings, n_jobs, stop_early, picks_list,
verbose):
"""docstring for _compute_covariance_auto."""
try:
from sklearn.model_selection import GridSearchCV
except Exception: # XXX support sklearn < 0.18
from sklearn.grid_search import GridSearchCV
from sklearn.covariance import (LedoitWolf, ShrunkCovariance,
EmpiricalCovariance)
# rescale to improve numerical stability
_apply_scaling_array(data.T, picks_list=picks_list, scalings=scalings)
estimator_cov_info = list()
msg = 'Estimating covariance using %s'
_RegCovariance, _ShrunkCovariance = _get_covariance_classes()
for this_method in method:
data_ = data.copy()
name = this_method.__name__ if callable(this_method) else this_method
logger.info(msg % name.upper())
if this_method == 'empirical':
est = EmpiricalCovariance(**method_params[this_method])
est.fit(data_)
_info = None
estimator_cov_info.append((est, est.covariance_, _info))
elif this_method == 'diagonal_fixed':
est = _RegCovariance(info=info, **method_params[this_method])
est.fit(data_)
_info = None
estimator_cov_info.append((est, est.covariance_, _info))
elif this_method == 'ledoit_wolf':
shrinkages = []
lw = LedoitWolf(**method_params[this_method])
for ch_type, picks in picks_list:
lw.fit(data_[:, picks])
shrinkages.append((
ch_type,
lw.shrinkage_,
picks
))
sc = _ShrunkCovariance(shrinkage=shrinkages,
**method_params[this_method])
sc.fit(data_)
_info = None
estimator_cov_info.append((sc, sc.covariance_, _info))
elif this_method == 'shrunk':
shrinkage = method_params[this_method].pop('shrinkage')
tuned_parameters = [{'shrinkage': shrinkage}]
shrinkages = []
gs = GridSearchCV(ShrunkCovariance(**method_params[this_method]),
tuned_parameters, cv=cv)
for ch_type, picks in picks_list:
gs.fit(data_[:, picks])
shrinkages.append((
ch_type,
gs.best_estimator_.shrinkage,
picks
))
shrinkages = [c[0] for c in zip(shrinkages)]
sc = _ShrunkCovariance(shrinkage=shrinkages,
**method_params[this_method])
sc.fit(data_)
_info = None
estimator_cov_info.append((sc, sc.covariance_, _info))
elif this_method == 'pca':
mp = method_params[this_method]
pca, _info = _auto_low_rank_model(data_, this_method,
n_jobs=n_jobs,
method_params=mp, cv=cv,
stop_early=stop_early)
pca.fit(data_)
estimator_cov_info.append((pca, pca.get_covariance(), _info))
elif this_method == 'factor_analysis':
mp = method_params[this_method]
fa, _info = _auto_low_rank_model(data_, this_method, n_jobs=n_jobs,
method_params=mp, cv=cv,
stop_early=stop_early)
fa.fit(data_)
estimator_cov_info.append((fa, fa.get_covariance(), _info))
else:
raise ValueError('Oh no! Your estimator does not have'
' a .fit method')
logger.info('Done.')
logger.info('Using cross-validation to select the best estimator.')
estimators, _, _ = zip(*estimator_cov_info)
logliks = np.array([_cross_val(data, e, cv, n_jobs) for e in estimators])
# undo scaling
for c in estimator_cov_info:
_undo_scaling_cov(c[1], picks_list, scalings)
out = dict()
estimators, covs, runtime_infos = zip(*estimator_cov_info)
cov_methods = [c.__name__ if callable(c) else c for c in method]
runtime_infos, covs = list(runtime_infos), list(covs)
my_zip = zip(cov_methods, runtime_infos, logliks, covs, estimators)
for this_method, runtime_info, loglik, data, est in my_zip:
out[this_method] = {'loglik': loglik, 'data': data, 'estimator': est}
if runtime_info is not None:
out[this_method].update(runtime_info)
return out
def _logdet(A):
"""Compute the log det of a symmetric matrix."""
vals = linalg.eigh(A)[0]
# avoid negative (numerical errors) or zero (semi-definite matrix) values
tol = vals.max() * vals.size * np.finfo(np.float64).eps
vals = np.where(vals > tol, vals, tol)
return np.sum(np.log(vals))
def _gaussian_loglik_scorer(est, X, y=None):
"""Compute the Gaussian log likelihood of X under the model in est."""
# compute empirical covariance of the test set
precision = est.get_precision()
n_samples, n_features = X.shape
log_like = np.zeros(n_samples)
log_like = -.5 * (X * (np.dot(X, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi) - _logdet(precision))
out = np.mean(log_like)
return out
def _cross_val(data, est, cv, n_jobs):
"""Helper to compute cross validation."""
try:
from sklearn.model_selection import cross_val_score
except ImportError:
# XXX support sklearn < 0.18
from sklearn.cross_validation import cross_val_score
return np.mean(cross_val_score(est, data, cv=cv, n_jobs=n_jobs,
scoring=_gaussian_loglik_scorer))
def _auto_low_rank_model(data, mode, n_jobs, method_params, cv,
stop_early=True, verbose=None):
"""compute latent variable models."""
method_params = cp.deepcopy(method_params)
iter_n_components = method_params.pop('iter_n_components')
if iter_n_components is None:
iter_n_components = np.arange(5, data.shape[1], 5)
from sklearn.decomposition import PCA, FactorAnalysis
if mode == 'factor_analysis':
est = FactorAnalysis
elif mode == 'pca':
est = PCA
else:
raise ValueError('Come on, this is not a low rank estimator: %s' %
mode)
est = est(**method_params)
est.n_components = 1
scores = np.empty_like(iter_n_components, dtype=np.float64)
scores.fill(np.nan)
# make sure we don't empty the thing if it's a generator
max_n = max(list(cp.deepcopy(iter_n_components)))
if max_n > data.shape[1]:
warn('You are trying to estimate %i components on matrix '
'with %i features.' % (max_n, data.shape[1]))
for ii, n in enumerate(iter_n_components):
est.n_components = n
try: # this may fail depending on rank and split
score = _cross_val(data=data, est=est, cv=cv, n_jobs=n_jobs)
except ValueError:
score = np.inf
if np.isinf(score) or score > 0:
logger.info('... infinite values encountered. stopping estimation')
break
logger.info('... rank: %i - loglik: %0.3f' % (n, score))
if score != -np.inf:
scores[ii] = score
if (ii >= 3 and np.all(np.diff(scores[ii - 3:ii]) < 0.) and
stop_early is True):
# early stop search when loglik has been going down 3 times
logger.info('early stopping parameter search.')
break
# happens if rank is too low right form the beginning
if np.isnan(scores).all():
raise RuntimeError('Oh no! Could not estimate covariance because all '
'scores were NaN. Please contact the MNE-Python '
'developers.')
i_score = np.nanargmax(scores)
best = est.n_components = iter_n_components[i_score]
logger.info('... best model at rank = %i' % best)
runtime_info = {'ranks': np.array(iter_n_components),
'scores': scores,
'best': best,
'cv': cv}
return est, runtime_info
def _get_covariance_classes():
"""Prepare special cov estimators."""
from sklearn.covariance import (EmpiricalCovariance, shrunk_covariance,
ShrunkCovariance)
class _RegCovariance(EmpiricalCovariance):
"""Aux class."""
def __init__(self, info, grad=0.01, mag=0.01, eeg=0.0,
store_precision=False, assume_centered=False):
self.info = info
self.grad = grad
self.mag = mag
self.eeg = eeg
self.store_precision = store_precision
self.assume_centered = assume_centered
def fit(self, X):
EmpiricalCovariance.fit(self, X)
self.covariance_ = 0.5 * (self.covariance_ + self.covariance_.T)
cov_ = Covariance(
data=self.covariance_, names=self.info['ch_names'],
bads=self.info['bads'], projs=self.info['projs'],
nfree=len(self.covariance_))
cov_ = regularize(cov_, self.info, grad=self.grad, mag=self.mag,
eeg=self.eeg, proj=False,
exclude='bads') # ~proj == important!!
self.covariance_ = cov_.data
return self
class _ShrunkCovariance(ShrunkCovariance):
"""Aux class."""
def __init__(self, store_precision, assume_centered, shrinkage=0.1):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.shrinkage = shrinkage
def fit(self, X):
EmpiricalCovariance.fit(self, X)
cov = self.covariance_
if not isinstance(self.shrinkage, (list, tuple)):
shrinkage = [('all', self.shrinkage, np.arange(len(cov)))]
else:
shrinkage = self.shrinkage
zero_cross_cov = np.zeros_like(cov, dtype=bool)
for a, b in itt.combinations(shrinkage, 2):
picks_i, picks_j = a[2], b[2]
ch_ = a[0], b[0]
if 'eeg' in ch_:
zero_cross_cov[np.ix_(picks_i, picks_j)] = True
zero_cross_cov[np.ix_(picks_j, picks_i)] = True
self.zero_cross_cov_ = zero_cross_cov
# Apply shrinkage to blocks
for ch_type, c, picks in shrinkage:
sub_cov = cov[np.ix_(picks, picks)]
cov[np.ix_(picks, picks)] = shrunk_covariance(sub_cov,
shrinkage=c)
# Apply shrinkage to cross-cov
for a, b in itt.combinations(shrinkage, 2):
shrinkage_i, shrinkage_j = a[1], b[1]
picks_i, picks_j = a[2], b[2]
c_ij = np.sqrt((1. - shrinkage_i) * (1. - shrinkage_j))
cov[np.ix_(picks_i, picks_j)] *= c_ij
cov[np.ix_(picks_j, picks_i)] *= c_ij
# Set to zero the necessary cross-cov
if np.any(zero_cross_cov):
cov[zero_cross_cov] = 0.0
self.covariance_ = cov
return self
def score(self, X_test, y=None):
"""Compute the log-likelihood of a Gaussian data set with
`self.covariance_` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples
is the number of samples and n_features is the number of
features. X_test is assumed to be drawn from the same
distribution as the data used in fit (including centering).
y : not used, present for API consistence purpose.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
from sklearn.covariance import empirical_covariance, log_likelihood
# compute empirical covariance of the test set
test_cov = empirical_covariance(X_test - self.location_,
assume_centered=True)
if np.any(self.zero_cross_cov_):
test_cov[self.zero_cross_cov_] = 0.
res = log_likelihood(test_cov, self.get_precision())
return res
return _RegCovariance, _ShrunkCovariance
###############################################################################
# Writing
def write_cov(fname, cov):
"""Write a noise covariance matrix.
Parameters
----------
fname : string
The name of the file. It should end with -cov.fif or -cov.fif.gz.
cov : Covariance
The noise covariance matrix
See Also
--------
read_cov
"""
cov.save(fname)
###############################################################################
# Prepare for inverse modeling
def _unpack_epochs(epochs):
"""Aux Function."""
if len(epochs.event_id) > 1:
epochs = [epochs[k] for k in epochs.event_id]
else:
epochs = [epochs]
return epochs
def _get_ch_whitener(A, pca, ch_type, rank):
""""Get whitener params for a set of channels."""
# whitening operator
eig, eigvec = linalg.eigh(A, overwrite_a=True)
eigvec = eigvec.T
eig[:-rank] = 0.0
logger.info('Setting small %s eigenvalues to zero.' % ch_type)
if not pca: # No PCA case.
logger.info('Not doing PCA for %s.' % ch_type)
else:
logger.info('Doing PCA for %s.' % ch_type)
# This line will reduce the actual number of variables in data
# and leadfield to the true rank.
eigvec = eigvec[:-rank].copy()
return eig, eigvec
@verbose
def prepare_noise_cov(noise_cov, info, ch_names, rank=None,
scalings=None, verbose=None):
"""Prepare noise covariance matrix.
Parameters
----------
noise_cov : Covariance
The noise covariance to process.
info : dict
The measurement info (used to get channel types and bad channels).
ch_names : list
The channel names to be considered.
rank : None | int | dict
Specified rank of the noise covariance matrix. If None, the rank is
detected automatically. If int, the rank is specified for the MEG
channels. A dictionary with entries 'eeg' and/or 'meg' can be used
to specify the rank for each modality.
scalings : dict | None
Data will be rescaled before rank estimation to improve accuracy.
If dict, it will override the following dict (default if None):
dict(mag=1e12, grad=1e11, eeg=1e5)
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
C_ch_idx = [noise_cov.ch_names.index(c) for c in ch_names]
if noise_cov['diag'] is False:
C = noise_cov.data[np.ix_(C_ch_idx, C_ch_idx)]
else:
C = np.diag(noise_cov.data[C_ch_idx])
scalings = _handle_default('scalings_cov_rank', scalings)
# Create the projection operator
proj, ncomp, _ = make_projector(info['projs'], ch_names)
if ncomp > 0:
logger.info(' Created an SSP operator (subspace dimension = %d)'
% ncomp)
C = np.dot(proj, np.dot(C, proj.T))
pick_meg = pick_types(info, meg=True, eeg=False, ref_meg=False,
exclude='bads')
pick_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude='bads')
meg_names = [info['chs'][k]['ch_name'] for k in pick_meg]
C_meg_idx = [k for k in range(len(C)) if ch_names[k] in meg_names]
eeg_names = [info['chs'][k]['ch_name'] for k in pick_eeg]
C_eeg_idx = [k for k in range(len(C)) if ch_names[k] in eeg_names]
has_meg = len(C_meg_idx) > 0
has_eeg = len(C_eeg_idx) > 0
# Get the specified noise covariance rank
if rank is not None:
if isinstance(rank, dict):
rank_meg = rank.get('meg', None)
rank_eeg = rank.get('eeg', None)
else:
rank_meg = int(rank)
rank_eeg = None
else:
rank_meg, rank_eeg = None, None
if has_meg:
C_meg = C[np.ix_(C_meg_idx, C_meg_idx)]
this_info = pick_info(info, pick_meg)
if rank_meg is None:
if len(C_meg_idx) < len(pick_meg):
this_info = pick_info(info, C_meg_idx)
rank_meg = _estimate_rank_meeg_cov(C_meg, this_info, scalings)
C_meg_eig, C_meg_eigvec = _get_ch_whitener(C_meg, False, 'MEG',
rank_meg)
if has_eeg:
C_eeg = C[np.ix_(C_eeg_idx, C_eeg_idx)]
this_info = pick_info(info, pick_eeg)
if rank_eeg is None:
if len(C_meg_idx) < len(pick_meg):
this_info = pick_info(info, C_eeg_idx)
rank_eeg = _estimate_rank_meeg_cov(C_eeg, this_info, scalings)
C_eeg_eig, C_eeg_eigvec = _get_ch_whitener(C_eeg, False, 'EEG',
rank_eeg)
if _needs_eeg_average_ref_proj(info):
warn('No average EEG reference present in info["projs"], covariance '
'may be adversely affected. Consider recomputing covariance using'
' a raw file with an average eeg reference projector added.')
n_chan = len(ch_names)
eigvec = np.zeros((n_chan, n_chan), dtype=np.float)
eig = np.zeros(n_chan, dtype=np.float)
if has_meg:
eigvec[np.ix_(C_meg_idx, C_meg_idx)] = C_meg_eigvec
eig[C_meg_idx] = C_meg_eig
if has_eeg:
eigvec[np.ix_(C_eeg_idx, C_eeg_idx)] = C_eeg_eigvec
eig[C_eeg_idx] = C_eeg_eig
assert(len(C_meg_idx) + len(C_eeg_idx) == n_chan)
noise_cov = cp.deepcopy(noise_cov)
noise_cov.update(data=C, eig=eig, eigvec=eigvec, dim=len(ch_names),
diag=False, names=ch_names)
return noise_cov
def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads',
proj=True, verbose=None):
"""Regularize noise covariance matrix.
This method works by adding a constant to the diagonal for each
channel type separately. Special care is taken to keep the
rank of the data constant.
**Note:** This function is kept for reasons of backward-compatibility.
Please consider explicitly using the ``method`` parameter in
`compute_covariance` to directly combine estimation with regularization
in a data-driven fashion see the
`faq <http://martinos.org/mne/dev/faq.html#how-should-i-regularize-the-covariance-matrix>`_
for more information.
Parameters
----------
cov : Covariance
The noise covariance matrix.
info : dict
The measurement info (used to get channel types and bad channels).
mag : float (default 0.1)
Regularization factor for MEG magnetometers.
grad : float (default 0.1)
Regularization factor for MEG gradiometers.
eeg : float (default 0.1)
Regularization factor for EEG.
exclude : list | 'bads' (default 'bads')
List of channels to mark as bad. If 'bads', bads channels
are extracted from both info['bads'] and cov['bads'].
proj : bool (default true)
Apply or not projections to keep rank of data.
verbose : bool | str | int | None (default None)
If not None, override default verbose level (see mne.verbose).
Returns
-------
reg_cov : Covariance
The regularized covariance matrix.
See Also
--------
compute_covariance
""" # noqa
cov = cp.deepcopy(cov)
info._check_consistency()
if exclude is None:
raise ValueError('exclude must be a list of strings or "bads"')
if exclude == 'bads':
exclude = info['bads'] + cov['bads']
sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=exclude)
sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,
exclude=exclude)
sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,
exclude=exclude)
info_ch_names = info['ch_names']
ch_names_eeg = [info_ch_names[i] for i in sel_eeg]
ch_names_mag = [info_ch_names[i] for i in sel_mag]
ch_names_grad = [info_ch_names[i] for i in sel_grad]
# This actually removes bad channels from the cov, which is not backward
# compatible, so let's leave all channels in
cov_good = pick_channels_cov(cov, include=info_ch_names, exclude=exclude)
ch_names = cov_good.ch_names
idx_eeg, idx_mag, idx_grad = [], [], []
for i, ch in enumerate(ch_names):
if ch in ch_names_eeg:
idx_eeg.append(i)
elif ch in ch_names_mag:
idx_mag.append(i)
elif ch in ch_names_grad:
idx_grad.append(i)
else:
raise Exception('channel is unknown type')
C = cov_good['data']
assert len(C) == (len(idx_eeg) + len(idx_mag) + len(idx_grad))
if proj:
projs = info['projs'] + cov_good['projs']
projs = activate_proj(projs)
for desc, idx, reg in [('EEG', idx_eeg, eeg), ('MAG', idx_mag, mag),
('GRAD', idx_grad, grad)]:
if len(idx) == 0 or reg == 0.0:
logger.info(" %s regularization : None" % desc)
continue
logger.info(" %s regularization : %s" % (desc, reg))
this_C = C[np.ix_(idx, idx)]
if proj:
this_ch_names = [ch_names[k] for k in idx]
P, ncomp, _ = make_projector(projs, this_ch_names)
U = linalg.svd(P)[0][:, :-ncomp]
if ncomp > 0:
logger.info(' Created an SSP operator for %s '
'(dimension = %d)' % (desc, ncomp))
this_C = np.dot(U.T, np.dot(this_C, U))
sigma = np.mean(np.diag(this_C))
this_C.flat[::len(this_C) + 1] += reg * sigma # modify diag inplace
if proj and ncomp > 0:
this_C = np.dot(U, np.dot(this_C, U.T))
C[np.ix_(idx, idx)] = this_C
# Put data back in correct locations
idx = pick_channels(cov.ch_names, info_ch_names, exclude=exclude)
cov['data'][np.ix_(idx, idx)] = C
return cov
def _regularized_covariance(data, reg=None):
"""Compute a regularized covariance from data using sklearn.
Parameters
----------
data : ndarray, shape (n_channels, n_times)
Data for covariance estimation.
reg : float | str | None (default None)
If not None, allow regularization for covariance estimation
if float, shrinkage covariance is used (0 <= shrinkage <= 1).
if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')
or Oracle Approximating Shrinkage ('oas').
Returns
-------
cov : ndarray, shape (n_channels, n_channels)
The covariance matrix.
"""
if reg is None:
# compute empirical covariance
cov = np.cov(data)
else:
no_sklearn_err = ('the scikit-learn package is missing and '
'required for covariance regularization.')
# use sklearn covariance estimators
if isinstance(reg, float):
if (reg < 0) or (reg > 1):
raise ValueError('0 <= shrinkage <= 1 for '
'covariance regularization.')
try:
import sklearn
sklearn_version = LooseVersion(sklearn.__version__)
from sklearn.covariance import ShrunkCovariance
except ImportError:
raise Exception(no_sklearn_err)
if sklearn_version < '0.12':
skl_cov = ShrunkCovariance(shrinkage=reg,
store_precision=False)
else:
# init sklearn.covariance.ShrunkCovariance estimator
skl_cov = ShrunkCovariance(shrinkage=reg,
store_precision=False,
assume_centered=True)
elif isinstance(reg, string_types):
if reg == 'ledoit_wolf':
try:
from sklearn.covariance import LedoitWolf
except ImportError:
raise Exception(no_sklearn_err)
# init sklearn.covariance.LedoitWolf estimator
skl_cov = LedoitWolf(store_precision=False,
assume_centered=True)
elif reg == 'oas':
try:
from sklearn.covariance import OAS
except ImportError:
raise Exception(no_sklearn_err)
# init sklearn.covariance.OAS estimator
skl_cov = OAS(store_precision=False,
assume_centered=True)
else:
raise ValueError("regularization parameter should be "
"'ledoit_wolf' or 'oas'")
else:
raise ValueError("regularization parameter should be "
"of type str or int (got %s)." % type(reg))
# compute regularized covariance using sklearn
cov = skl_cov.fit(data.T).covariance_
return cov
@verbose
def compute_whitener(noise_cov, info, picks=None, rank=None,
scalings=None, verbose=None):
"""Compute whitening matrix.
Parameters
----------
noise_cov : Covariance
The noise covariance.
info : dict
The measurement info.
picks : array-like of int | None
The channels indices to include. If None the data
channels in info, except bad channels, are used.
rank : None | int | dict
Specified rank of the noise covariance matrix. If None, the rank is
detected automatically. If int, the rank is specified for the MEG
channels. A dictionary with entries 'eeg' and/or 'meg' can be used
to specify the rank for each modality.
scalings : dict | None
The rescaling method to be applied. See documentation of
``prepare_noise_cov`` for details.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
W : 2d array
The whitening matrix.
ch_names : list
The channel names.
"""
if picks is None:
picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
ch_names = [info['chs'][k]['ch_name'] for k in picks]
noise_cov = cp.deepcopy(noise_cov)
noise_cov = prepare_noise_cov(noise_cov, info, ch_names,
rank=rank, scalings=scalings)
n_chan = len(ch_names)
W = np.zeros((n_chan, n_chan), dtype=np.float)
#
# Omit the zeroes due to projection
#
eig = noise_cov['eig']
nzero = (eig > 0)
W[nzero, nzero] = 1.0 / np.sqrt(eig[nzero])
#
# Rows of eigvec are the eigenvectors
#
W = np.dot(W, noise_cov['eigvec'])
W = np.dot(noise_cov['eigvec'].T, W)
return W, ch_names
@verbose
def whiten_evoked(evoked, noise_cov, picks=None, diag=False, rank=None,
scalings=None, verbose=None):
"""Whiten evoked data using given noise covariance.
Parameters
----------
evoked : instance of Evoked
The evoked data
noise_cov : instance of Covariance
The noise covariance
picks : array-like of int | None
The channel indices to whiten. Can be None to whiten MEG and EEG
data.
diag : bool (default False)
If True, whiten using only the diagonal of the covariance.
rank : None | int | dict (default None)
Specified rank of the noise covariance matrix. If None, the rank is
detected automatically. If int, the rank is specified for the MEG
channels. A dictionary with entries 'eeg' and/or 'meg' can be used
to specify the rank for each modality.
scalings : dict | None (default None)
To achieve reliable rank estimation on multiple sensors,
sensors have to be rescaled. This parameter controls the
rescaling. If dict, it will override the
following default dict (default if None):
dict(mag=1e12, grad=1e11, eeg=1e5)
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
evoked_white : instance of Evoked
The whitened evoked data.
"""
evoked = cp.deepcopy(evoked)
if picks is None:
picks = pick_types(evoked.info, meg=True, eeg=True)
W = _get_whitener_data(evoked.info, noise_cov, picks,
diag=diag, rank=rank, scalings=scalings)
evoked.data[picks] = np.sqrt(evoked.nave) * np.dot(W, evoked.data[picks])
return evoked
@verbose
def _get_whitener_data(info, noise_cov, picks, diag=False, rank=None,
scalings=None, verbose=None):
"""Get whitening matrix for a set of data."""
ch_names = [info['ch_names'][k] for k in picks]
noise_cov = pick_channels_cov(noise_cov, include=ch_names, exclude=[])
info = pick_info(info, picks)
if diag:
noise_cov = cp.deepcopy(noise_cov)
noise_cov['data'] = np.diag(np.diag(noise_cov['data']))
scalings = _handle_default('scalings_cov_rank', scalings)
W = compute_whitener(noise_cov, info, rank=rank, scalings=scalings)[0]
return W
@verbose
def _read_cov(fid, node, cov_kind, limited=False, verbose=None):
"""Read a noise covariance matrix."""
# Find all covariance matrices
covs = dir_tree_find(node, FIFF.FIFFB_MNE_COV)
if len(covs) == 0:
raise ValueError('No covariance matrices found')
# Is any of the covariance matrices a noise covariance
for p in range(len(covs)):
tag = find_tag(fid, covs[p], FIFF.FIFF_MNE_COV_KIND)
if tag is not None and int(tag.data) == cov_kind:
this = covs[p]
# Find all the necessary data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIM)
if tag is None:
raise ValueError('Covariance matrix dimension not found')
dim = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_NFREE)
if tag is None:
nfree = -1
else:
nfree = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_METHOD)
if tag is None:
method = None
else:
method = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_SCORE)
if tag is None:
score = None
else:
score = tag.data[0]
tag = find_tag(fid, this, FIFF.FIFF_MNE_ROW_NAMES)
if tag is None:
names = []
else:
names = tag.data.split(':')
if len(names) != dim:
raise ValueError('Number of names does not match '
'covariance matrix dimension')
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIAG)
if tag is None:
raise ValueError('No covariance matrix data found')
else:
# Diagonal is stored
data = tag.data
diag = True
logger.info(' %d x %d diagonal covariance (kind = '
'%d) found.' % (dim, dim, cov_kind))
else:
from scipy import sparse
if not sparse.issparse(tag.data):
# Lower diagonal is stored
vals = tag.data
data = np.zeros((dim, dim))
data[np.tril(np.ones((dim, dim))) > 0] = vals
data = data + data.T
data.flat[::dim + 1] /= 2.0
diag = False
logger.info(' %d x %d full covariance (kind = %d) '
'found.' % (dim, dim, cov_kind))
else:
diag = False
data = tag.data
logger.info(' %d x %d sparse covariance (kind = %d)'
' found.' % (dim, dim, cov_kind))
# Read the possibly precomputed decomposition
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVALUES)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVECTORS)
if tag1 is not None and tag2 is not None:
eig = tag1.data
eigvec = tag2.data
else:
eig = None
eigvec = None
# Read the projection operator
projs = _read_proj(fid, this)
# Read the bad channel list
bads = read_bad_channels(fid, this)
# Put it together
assert dim == len(data)
assert data.ndim == (1 if diag else 2)
cov = dict(kind=cov_kind, diag=diag, dim=dim, names=names,
data=data, projs=projs, bads=bads, nfree=nfree, eig=eig,
eigvec=eigvec)
if score is not None:
cov['loglik'] = score
if method is not None:
cov['method'] = method
if limited:
del cov['kind'], cov['dim'], cov['diag']
return cov
logger.info(' Did not find the desired covariance matrix (kind = %d)'
% cov_kind)
return None
def _write_cov(fid, cov):
"""Write a noise covariance matrix."""
start_block(fid, FIFF.FIFFB_MNE_COV)
# Dimensions etc.
write_int(fid, FIFF.FIFF_MNE_COV_KIND, cov['kind'])
write_int(fid, FIFF.FIFF_MNE_COV_DIM, cov['dim'])
if cov['nfree'] > 0:
write_int(fid, FIFF.FIFF_MNE_COV_NFREE, cov['nfree'])
# Channel names
if cov['names'] is not None and len(cov['names']) > 0:
write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, cov['names'])
# Data
if cov['diag']:
write_double(fid, FIFF.FIFF_MNE_COV_DIAG, cov['data'])
else:
# Store only lower part of covariance matrix
dim = cov['dim']
mask = np.tril(np.ones((dim, dim), dtype=np.bool)) > 0
vals = cov['data'][mask].ravel()
write_double(fid, FIFF.FIFF_MNE_COV, vals)
# Eigenvalues and vectors if present
if cov['eig'] is not None and cov['eigvec'] is not None:
write_float_matrix(fid, FIFF.FIFF_MNE_COV_EIGENVECTORS, cov['eigvec'])
write_double(fid, FIFF.FIFF_MNE_COV_EIGENVALUES, cov['eig'])
# Projection operator
if cov['projs'] is not None and len(cov['projs']) > 0:
_write_proj(fid, cov['projs'])
# Bad channels
if cov['bads'] is not None and len(cov['bads']) > 0:
start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, cov['bads'])
end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
# estimator method
if 'method' in cov:
write_string(fid, FIFF.FIFF_MNE_COV_METHOD, cov['method'])
# negative log-likelihood score
if 'loglik' in cov:
write_double(
fid, FIFF.FIFF_MNE_COV_SCORE, np.array(cov['loglik']))
# Done!
end_block(fid, FIFF.FIFFB_MNE_COV)
def _apply_scaling_array(data, picks_list, scalings):
"""Scale data type-dependently for estimation."""
scalings = _check_scaling_inputs(data, picks_list, scalings)
if isinstance(scalings, dict):
picks_dict = dict(picks_list)
scalings = [(picks_dict[k], v) for k, v in scalings.items()
if k in picks_dict]
for idx, scaling in scalings:
data[idx, :] *= scaling # F - order
else:
data *= scalings[:, np.newaxis] # F - order
def _undo_scaling_array(data, picks_list, scalings):
scalings = _check_scaling_inputs(data, picks_list, scalings)
if isinstance(scalings, dict):
scalings = dict((k, 1. / v) for k, v in scalings.items())
elif isinstance(scalings, np.ndarray):
scalings = 1. / scalings
return _apply_scaling_array(data, picks_list, scalings)
def _apply_scaling_cov(data, picks_list, scalings):
"""Scale resulting data after estimation."""
scalings = _check_scaling_inputs(data, picks_list, scalings)
scales = None
if isinstance(scalings, dict):
n_channels = len(data)
covinds = list(zip(*picks_list))[1]
assert len(data) == sum(len(k) for k in covinds)
assert list(sorted(np.concatenate(covinds))) == list(range(len(data)))
scales = np.zeros(n_channels)
for ch_t, idx in picks_list:
scales[idx] = scalings[ch_t]
elif isinstance(scalings, np.ndarray):
if len(scalings) != len(data):
raise ValueError('Scaling factors and data are of incompatible '
'shape')
scales = scalings
elif scalings is None:
pass
else:
raise RuntimeError('Arff...')
if scales is not None:
assert np.sum(scales == 0.) == 0
data *= (scales[None, :] * scales[:, None])
def _undo_scaling_cov(data, picks_list, scalings):
scalings = _check_scaling_inputs(data, picks_list, scalings)
if isinstance(scalings, dict):
scalings = dict((k, 1. / v) for k, v in scalings.items())
elif isinstance(scalings, np.ndarray):
scalings = 1. / scalings
return _apply_scaling_cov(data, picks_list, scalings)
def _check_scaling_inputs(data, picks_list, scalings):
"""Aux function."""
rescale_dict_ = dict(mag=1e15, grad=1e13, eeg=1e6)
scalings_ = None
if isinstance(scalings, string_types) and scalings == 'norm':
scalings_ = 1. / _compute_row_norms(data)
elif isinstance(scalings, dict):
rescale_dict_.update(scalings)
scalings_ = rescale_dict_
elif isinstance(scalings, np.ndarray):
scalings_ = scalings
elif scalings is None:
pass
else:
raise NotImplementedError("No way! That's not a rescaling "
'option: %s' % scalings)
return scalings_
def _estimate_rank_meeg_signals(data, info, scalings, tol='auto',
return_singular=False):
"""Estimate rank for M/EEG data.
Parameters
----------
data : np.ndarray of float, shape(n_channels, n_samples)
The M/EEG signals.
info : Info
The measurment info.
scalings : dict | 'norm' | np.ndarray | None
The rescaling method to be applied. If dict, it will override the
following default dict:
dict(mag=1e15, grad=1e13, eeg=1e6)
If 'norm' data will be scaled by channel-wise norms. If array,
pre-specified norms will be used. If None, no scaling will be applied.
tol : float | str
Tolerance. See ``estimate_rank``.
return_singular : bool
If True, also return the singular values that were used
to determine the rank.
copy : bool
If False, values in data will be modified in-place during
rank estimation (saves memory).
Returns
-------
rank : int
Estimated rank of the data.
s : array
If return_singular is True, the singular values that were
thresholded to determine the rank are also returned.
"""
picks_list = _picks_by_type(info)
_apply_scaling_array(data, picks_list, scalings)
if data.shape[1] < data.shape[0]:
ValueError("You've got fewer samples than channels, your "
"rank estimate might be inaccurate.")
out = estimate_rank(data, tol=tol, norm=False,
return_singular=return_singular)
rank = out[0] if isinstance(out, tuple) else out
ch_type = ' + '.join(list(zip(*picks_list))[0])
logger.info('estimated rank (%s): %d' % (ch_type, rank))
_undo_scaling_array(data, picks_list, scalings)
return out
def _estimate_rank_meeg_cov(data, info, scalings, tol='auto',
return_singular=False):
"""Estimate rank for M/EEG data.
Parameters
----------
data : np.ndarray of float, shape (n_channels, n_channels)
The M/EEG covariance.
info : Info
The measurment info.
scalings : dict | 'norm' | np.ndarray | None
The rescaling method to be applied. If dict, it will override the
following default dict:
dict(mag=1e12, grad=1e11, eeg=1e5)
If 'norm' data will be scaled by channel-wise norms. If array,
pre-specified norms will be used. If None, no scaling will be applied.
tol : float | str
Tolerance. See ``estimate_rank``.
return_singular : bool
If True, also return the singular values that were used
to determine the rank.
Returns
-------
rank : int
Estimated rank of the data.
s : array
If return_singular is True, the singular values that were
thresholded to determine the rank are also returned.
"""
picks_list = _picks_by_type(info)
scalings = _handle_default('scalings_cov_rank', scalings)
_apply_scaling_cov(data, picks_list, scalings)
if data.shape[1] < data.shape[0]:
ValueError("You've got fewer samples than channels, your "
"rank estimate might be inaccurate.")
out = estimate_rank(data, tol=tol, norm=False,
return_singular=return_singular)
rank = out[0] if isinstance(out, tuple) else out
ch_type = ' + '.join(list(zip(*picks_list))[0])
logger.info('estimated rank (%s): %d' % (ch_type, rank))
_undo_scaling_cov(data, picks_list, scalings)
return out
|
wronk/mne-python
|
mne/cov.py
|
Python
|
bsd-3-clause
| 75,382
|
[
"Gaussian"
] |
1507b7d8617241dc15af347e89efaf31c064f3b4f21822a1f704bf4a39d5ca52
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import re
import logging
import os
import numpy as np
import math
from monty.io import zopen
from monty.json import jsanitize
from monty.json import MSONable
from pymatgen.core import Molecule
from pymatgen.analysis.graphs import MoleculeGraph
from pymatgen.analysis.local_env import OpenBabelNN
import networkx as nx
try:
import openbabel as ob
have_babel = True
except ImportError:
ob = None
have_babel = False
from .utils import read_table_pattern, read_pattern, process_parsed_coords
__author__ = "Samuel Blau, Brandon Wood, Shyam Dwaraknath"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
logger = logging.getLogger(__name__)
class QCOutput(MSONable):
"""
Class to parse QChem output files.
"""
def __init__(self, filename):
"""
Args:
filename (str): Filename to parse
"""
self.filename = filename
self.data = {}
self.data["errors"] = []
self.text = ""
with zopen(filename, 'rt') as f:
self.text = f.read()
# Check if output file contains multiple output files. If so, print an error message and exit
self.data["multiple_outputs"] = read_pattern(
self.text, {
"key": r"Job\s+\d+\s+of\s+(\d+)\s+"
},
terminate_on_match=True).get('key')
if not (self.data.get('multiple_outputs') == None
or self.data.get('multiple_outputs') == [['1']]):
print(
"ERROR: multiple calculation outputs found in file " +
filename +
". Please instead call QCOutput.mulitple_outputs_from_file(QCOutput,'"
+ filename + "')")
print("Exiting...")
exit()
# Parse the molecular details: charge, multiplicity,
# species, and initial geometry.
self._read_charge_and_multiplicity()
if self.data.get('charge') is not None:
self._read_species_and_inital_geometry()
# Check if calculation finished
self.data["completion"] = read_pattern(
self.text, {
"key":
r"Thank you very much for using Q-Chem.\s+Have a nice day."
},
terminate_on_match=True).get('key')
# If the calculation finished, parse the job time.
if self.data.get('completion', []):
temp_timings = read_pattern(
self.text, {
"key":
r"Total job time\:\s*([\d\-\.]+)s\(wall\)\,\s*([\d\-\.]+)s\(cpu\)"
}).get('key')
if temp_timings is not None:
self.data["walltime"] = float(temp_timings[0][0])
self.data["cputime"] = float(temp_timings[0][1])
else:
self.data["walltime"] = None
self.data["cputime"] = None
# Check if calculation is unrestricted
self.data["unrestricted"] = read_pattern(
self.text, {
"key":
r"A(?:n)*\sunrestricted[\s\w\-]+SCF\scalculation\swill\sbe"
},
terminate_on_match=True).get('key')
# Check if calculation uses GEN_SCFMAN, multiple potential output formats
self.data["using_GEN_SCFMAN"] = read_pattern(
self.text, {
"key": r"\s+GEN_SCFMAN: A general SCF calculation manager"
},
terminate_on_match=True).get('key')
if not self.data["using_GEN_SCFMAN"]:
self.data["using_GEN_SCFMAN"] = read_pattern(
self.text, {
"key": r"\s+General SCF calculation program by"
},
terminate_on_match=True).get('key')
# Check if the SCF failed to converge
if read_pattern(
self.text, {
"key": r"SCF failed to converge"
},
terminate_on_match=True).get('key') == [[]]:
self.data["errors"] += ["SCF_failed_to_converge"]
# Parse the SCF
self._read_SCF()
# Parse the Mulliken charges
self._read_mulliken()
# Parse PCM information
self._read_pcm_information()
# Parse the final energy
temp_final_energy = read_pattern(
self.text, {
"key": r"Final\senergy\sis\s+([\d\-\.]+)"
}).get('key')
if temp_final_energy == None:
self.data["final_energy"] = None
else:
self.data["final_energy"] = float(temp_final_energy[0][0])
# Parse the S2 values in the case of an unrestricted calculation
if self.data.get('unrestricted', []):
temp_S2 = read_pattern(self.text, {
"key": r"<S\^2>\s=\s+([\d\-\.]+)"
}).get('key')
if temp_S2 == None:
self.data["S2"] = None
elif len(temp_S2) == 1:
self.data["S2"] = float(temp_S2[0][0])
else:
real_S2 = np.zeros(len(temp_S2))
for ii, entry in enumerate(temp_S2):
real_S2[ii] = float(entry[0])
self.data["S2"] = real_S2
# Check if the calculation is a geometry optimization. If so, parse the relevant output
self.data["optimization"] = read_pattern(
self.text, {
"key": r"(?i)\s*job(?:_)*type\s*(?:=)*\s*opt"
}).get('key')
if self.data.get('optimization', []):
temp_energy_trajectory = read_pattern(
self.text, {
"key": r"\sEnergy\sis\s+([\d\-\.]+)"
}).get('key')
if temp_energy_trajectory == None:
self.data["energy_trajectory"] = []
else:
real_energy_trajectory = np.zeros(len(temp_energy_trajectory))
for ii, entry in enumerate(temp_energy_trajectory):
real_energy_trajectory[ii] = float(entry[0])
self.data["energy_trajectory"] = real_energy_trajectory
self._read_last_geometry()
if have_babel:
self._check_for_structure_changes()
self._read_optimized_geometry()
# Then, if no optimized geometry or z-matrix is found, and no errors have been previously
# idenfied, check to see if the optimization failed to converge or if Lambda wasn't able
# to be determined.
if len(self.data.get("errors")) == 0 and self.data.get('optimized_geometry') is None \
and len(self.data.get('optimized_zmat')) == 0:
self._check_optimization_errors()
# Check if the calculation contains a constraint in an $opt section.
self.data["opt_constraint"] = read_pattern(self.text, {
"key": r"\$opt\s+CONSTRAINT"
}).get('key')
if self.data.get('opt_constraint'):
temp_constraint = read_pattern(
self.text, {
"key":
r"Constraints and their Current Values\s+Value\s+Constraint\s+(\w+)\:\s+([\d\-\.]+)\s+([\d\-\.]+)\s+([\d\-\.]+)\s+([\d\-\.]+)\s+([\d\-\.]+)\s+([\d\-\.]+)"
}).get('key')
if temp_constraint != None:
self.data["opt_constraint"] = temp_constraint[0]
if float(self.data.get('opt_constraint')[5]) != float(
self.data.get('opt_constraint')[6]):
if abs(float(self.data.get('opt_constraint')[5])) != abs(
float(self.data.get('opt_constraint')[6])):
raise ValueError(
"ERROR: Opt section value and constraint should be the same!"
)
elif abs(float(
self.data.get('opt_constraint')[5])) not in [
0.0, 180.0
]:
raise ValueError(
"ERROR: Opt section value and constraint can only differ by a sign at 0.0 and 180.0!"
)
# Check if the calculation is a frequency analysis. If so, parse the relevant output
self.data["frequency_job"] = read_pattern(
self.text, {
"key": r"(?i)\s*job(?:_)*type\s*(?:=)*\s*freq"
},
terminate_on_match=True).get('key')
if self.data.get('frequency_job', []):
self._read_frequency_data()
self.data["single_point_job"] = read_pattern(
self.text, {
"key": r"(?i)\s*job(?:_)*type\s*(?:=)*\s*sp"
},
terminate_on_match=True).get("key")
if self.data.get("single_point_job", []):
self._read_single_point_data()
# If the calculation did not finish and no errors have been identified yet, check for other errors
if not self.data.get('completion',
[]) and self.data.get("errors") == []:
self._check_completion_errors()
@staticmethod
def multiple_outputs_from_file(cls, filename, keep_sub_files=True):
"""
Parses a QChem output file with multiple calculations
1.) Seperates the output into sub-files
e.g. qcout -> qcout.0, qcout.1, qcout.2 ... qcout.N
a.) Find delimeter for multiple calcualtions
b.) Make seperate output sub-files
2.) Creates seperate QCCalcs for each one from the sub-files
"""
to_return = []
with zopen(filename, 'rt') as f:
text = re.split(r'\s*(?:Running\s+)*Job\s+\d+\s+of\s+\d+\s+',
f.read())
if text[0] == '':
text = text[1:]
for i, sub_text in enumerate(text):
temp = open(filename + '.' + str(i), 'w')
temp.write(sub_text)
temp.close()
tempOutput = cls(filename + '.' + str(i))
to_return.append(tempOutput)
if not keep_sub_files:
os.remove(filename + '.' + str(i))
return to_return
def _read_charge_and_multiplicity(self):
"""
Parses charge and multiplicity.
"""
temp_charge = read_pattern(
self.text, {
"key": r"\$molecule\s+([\-\d]+)\s+\d"
},
terminate_on_match=True).get('key')
if temp_charge != None:
self.data["charge"] = int(temp_charge[0][0])
else:
temp_charge = read_pattern(
self.text, {
"key": r"Sum of atomic charges \=\s+([\d\-\.\+]+)"
},
terminate_on_match=True).get('key')
if temp_charge == None:
self.data["charge"] = None
else:
self.data["charge"] = int(float(temp_charge[0][0]))
temp_multiplicity = read_pattern(
self.text, {
"key": r"\$molecule\s+[\-\d]+\s+(\d)"
},
terminate_on_match=True).get('key')
if temp_multiplicity != None:
self.data["multiplicity"] = int(temp_multiplicity[0][0])
else:
temp_multiplicity = read_pattern(
self.text, {
"key": r"Sum of spin\s+charges \=\s+([\d\-\.\+]+)"
},
terminate_on_match=True).get('key')
if temp_multiplicity == None:
self.data["multiplicity"] = 1
else:
self.data["multiplicity"] = int(
float(temp_multiplicity[0][0])) + 1
def _read_species_and_inital_geometry(self):
"""
Parses species and initial geometry.
"""
header_pattern = r"Standard Nuclear Orientation \(Angstroms\)\s+I\s+Atom\s+X\s+Y\s+Z\s+-+"
table_pattern = r"\s*\d+\s+([a-zA-Z]+)\s*([\d\-\.]+)\s*([\d\-\.]+)\s*([\d\-\.]+)\s*"
footer_pattern = r"\s*-+"
temp_geom = read_table_pattern(self.text, header_pattern,
table_pattern, footer_pattern)
if temp_geom == None or len(temp_geom) == 0:
self.data["species"] = None
self.data["initial_geometry"] = None
self.data["initial_molecule"] = None
self.data["point_group"] = None
else:
temp_point_group = read_pattern(
self.text, {
"key":
r"Molecular Point Group\s+([A-Za-z\d\*]+)"
},
terminate_on_match=True).get('key')
if temp_point_group != None:
self.data["point_group"] = temp_point_group[0][0]
else:
self.data["point_group"] = None
temp_geom = temp_geom[0]
species = []
geometry = np.zeros(shape=(len(temp_geom), 3), dtype=float)
for ii, entry in enumerate(temp_geom):
species += [entry[0]]
for jj in range(3):
geometry[ii, jj] = float(entry[jj + 1])
self.data["species"] = species
self.data["initial_geometry"] = geometry
self.data["initial_molecule"] = Molecule(
species=species,
coords=geometry,
charge=self.data.get('charge'),
spin_multiplicity=self.data.get('multiplicity'))
def _read_SCF(self):
"""
Parses both old and new SCFs.
"""
if self.data.get('using_GEN_SCFMAN', []):
if "SCF_failed_to_converge" in self.data.get("errors"):
footer_pattern = r"^\s*gen_scfman_exception: SCF failed to converge"
else:
footer_pattern = r"^\s*\-+\n\s+SCF time"
header_pattern = r"^\s*\-+\s+Cycle\s+Energy\s+(?:(?:DIIS)*\s+[Ee]rror)*(?:RMS Gradient)*\s+\-+(?:\s*\-+\s+OpenMP\s+Integral\s+computing\s+Module\s+(?:Release:\s+version\s+[\d\-\.]+\,\s+\w+\s+[\d\-\.]+\, Q-Chem Inc\. Pittsburgh\s+)*\-+)*\n"
table_pattern = r"(?:\s*Nonlocal correlation = [\d\-\.]+e[\d\-]+)*(?:\s*Inaccurate integrated density:\n\s+Number of electrons\s+=\s+[\d\-\.]+\n\s+Numerical integral\s+=\s+[\d\-\.]+\n\s+Relative error\s+=\s+[\d\-\.]+\s+\%\n)*\s*\d+\s+([\d\-\.]+)\s+([\d\-\.]+)e([\d\-\.\+]+)(?:\s+Convergence criterion met)*(?:\s+Preconditoned Steepest Descent)*(?:\s+Roothaan Step)*(?:\s+(?:Normal\s+)*BFGS [Ss]tep)*(?:\s+LineSearch Step)*(?:\s+Line search: overstep)*(?:\s+Dog-leg BFGS step)*(?:\s+Line search: understep)*(?:\s+Descent step)*"
else:
if "SCF_failed_to_converge" in self.data.get("errors"):
footer_pattern = r"^\s*\d+\s*[\d\-\.]+\s+[\d\-\.]+E[\d\-\.]+\s+Convergence\s+failure\n"
else:
footer_pattern = r"^\s*\-+\n"
header_pattern = r"^\s*\-+\s+Cycle\s+Energy\s+DIIS Error\s+\-+\n"
table_pattern = r"(?:\s*Inaccurate integrated density:\n\s+Number of electrons\s+=\s+[\d\-\.]+\n\s+Numerical integral\s+=\s+[\d\-\.]+\n\s+Relative error\s+=\s+[\d\-\.]+\s+\%\n)*\s*\d+\s*([\d\-\.]+)\s+([\d\-\.]+)E([\d\-\.\+]+)(?:\s*\n\s*cpu\s+[\d\-\.]+\swall\s+[\d\-\.]+)*(?:\nin dftxc\.C, eleTot sum is:[\d\-\.]+, tauTot is\:[\d\-\.]+)*(?:\s+Convergence criterion met)*(?:\s+Done RCA\. Switching to DIIS)*(?:\n\s*Warning: not using a symmetric Q)*(?:\nRecomputing EXC\s*[\d\-\.]+\s*[\d\-\.]+\s*[\d\-\.]+(?:\s*\nRecomputing EXC\s*[\d\-\.]+\s*[\d\-\.]+\s*[\d\-\.]+)*)*"
temp_scf = read_table_pattern(self.text, header_pattern, table_pattern,
footer_pattern)
real_scf = []
for one_scf in temp_scf:
temp = np.zeros(shape=(len(one_scf), 2))
for ii, entry in enumerate(one_scf):
temp[ii, 0] = float(entry[0])
temp[ii, 1] = float(entry[1]) * 10**float(entry[2])
real_scf += [temp]
self.data["SCF"] = real_scf
def _read_mulliken(self):
"""
Parses Mulliken charges. Also parses spins given an unrestricted SCF.
"""
if self.data.get('unrestricted', []):
header_pattern = r"\-+\s+Ground-State Mulliken Net Atomic Charges\s+Atom\s+Charge \(a\.u\.\)\s+Spin\s\(a\.u\.\)\s+\-+"
table_pattern = r"\s+\d+\s\w+\s+([\d\-\.]+)\s+([\d\-\.]+)"
footer_pattern = r"\s\s\-+\s+Sum of atomic charges"
else:
header_pattern = r"\-+\s+Ground-State Mulliken Net Atomic Charges\s+Atom\s+Charge \(a\.u\.\)\s+\-+"
table_pattern = r"\s+\d+\s\w+\s+([\d\-\.]+)"
footer_pattern = r"\s\s\-+\s+Sum of atomic charges"
temp_mulliken = read_table_pattern(self.text, header_pattern,
table_pattern, footer_pattern)
real_mulliken = []
for one_mulliken in temp_mulliken:
if self.data.get('unrestricted', []):
temp = np.zeros(shape=(len(one_mulliken), 2))
for ii, entry in enumerate(one_mulliken):
temp[ii, 0] = float(entry[0])
temp[ii, 1] = float(entry[1])
else:
temp = np.zeros(len(one_mulliken))
for ii, entry in enumerate(one_mulliken):
temp[ii] = float(entry[0])
real_mulliken += [temp]
self.data["Mulliken"] = real_mulliken
def _read_optimized_geometry(self):
"""
Parses optimized XYZ coordinates. If not present, parses optimized Z-matrix.
"""
header_pattern = r"\*+\s+OPTIMIZATION\s+CONVERGED\s+\*+\s+\*+\s+Coordinates \(Angstroms\)\s+ATOM\s+X\s+Y\s+Z"
table_pattern = r"\s+\d+\s+\w+\s+([\d\-\.]+)\s+([\d\-\.]+)\s+([\d\-\.]+)"
footer_pattern = r"\s+Z-matrix Print:"
parsed_optimized_geometry = read_table_pattern(
self.text, header_pattern, table_pattern, footer_pattern)
if parsed_optimized_geometry == [] or None:
self.data["optimized_geometry"] = None
header_pattern = r"^\s+\*+\s+OPTIMIZATION CONVERGED\s+\*+\s+\*+\s+Z-matrix\s+Print:\s+\$molecule\s+[\d\-]+\s+[\d\-]+\n"
table_pattern = r"\s*(\w+)(?:\s+(\d+)\s+([\d\-\.]+)(?:\s+(\d+)\s+([\d\-\.]+)(?:\s+(\d+)\s+([\d\-\.]+))*)*)*(?:\s+0)*"
footer_pattern = r"^\$end\n"
self.data["optimized_zmat"] = read_table_pattern(
self.text, header_pattern, table_pattern, footer_pattern)
else:
self.data["optimized_geometry"] = process_parsed_coords(
parsed_optimized_geometry[0])
if self.data.get('charge') != None:
self.data["molecule_from_optimized_geometry"] = Molecule(
species=self.data.get('species'),
coords=self.data.get('optimized_geometry'),
charge=self.data.get('charge'),
spin_multiplicity=self.data.get('multiplicity'))
def _read_last_geometry(self):
"""
Parses the last geometry from an optimization trajectory for use in a new input file.
"""
header_pattern = r"\s+Optimization\sCycle:\s+" + \
str(len(self.data.get("energy_trajectory"))) + \
r"\s+Coordinates \(Angstroms\)\s+ATOM\s+X\s+Y\s+Z"
table_pattern = r"\s+\d+\s+\w+\s+([\d\-\.]+)\s+([\d\-\.]+)\s+([\d\-\.]+)"
footer_pattern = r"\s+Point Group\:\s+[\d\w\*]+\s+Number of degrees of freedom\:\s+\d+"
parsed_last_geometry = read_table_pattern(
self.text, header_pattern, table_pattern, footer_pattern)
if parsed_last_geometry == [] or None:
self.data["last_geometry"] = None
else:
self.data["last_geometry"] = process_parsed_coords(
parsed_last_geometry[0])
if self.data.get('charge') != None:
self.data["molecule_from_last_geometry"] = Molecule(
species=self.data.get('species'),
coords=self.data.get('last_geometry'),
charge=self.data.get('charge'),
spin_multiplicity=self.data.get('multiplicity'))
def _check_for_structure_changes(self):
initial_mol_graph = MoleculeGraph.with_local_env_strategy(self.data["initial_molecule"],
OpenBabelNN(),
reorder=False,
extend_structure=False)
initial_graph = initial_mol_graph.graph
last_mol_graph = MoleculeGraph.with_local_env_strategy(self.data["molecule_from_last_geometry"],
OpenBabelNN(),
reorder=False,
extend_structure=False)
last_graph = last_mol_graph.graph
if initial_mol_graph.isomorphic_to(last_mol_graph):
self.data["structure_change"] = "no_change"
else:
if nx.is_connected(initial_graph.to_undirected()) and not nx.is_connected(last_graph.to_undirected()):
self.data["structure_change"] = "unconnected_fragments"
elif last_graph.number_of_edges() < initial_graph.number_of_edges():
self.data["structure_change"] = "fewer_bonds"
elif last_graph.number_of_edges() > initial_graph.number_of_edges():
self.data["structure_change"] = "more_bonds"
else:
self.data["structure_change"] = "bond_change"
def _read_frequency_data(self):
"""
Parses frequencies, enthalpy, entropy, and mode vectors.
"""
temp_dict = read_pattern(
self.text, {
"frequencies":
r"\s*Frequency:\s+([\d\-\.]+)(?:\s+([\d\-\.]+)(?:\s+([\d\-\.]+))*)*",
"IR_intens":
r"\s*IR Intens:\s+([\d\-\.]+)(?:\s+([\d\-\.]+)(?:\s+([\d\-\.]+))*)*",
"IR_active":
r"\s*IR Active:\s+([YESNO]+)(?:\s+([YESNO]+)(?:\s+([YESNO]+))*)*",
"ZPE":
r"\s*Zero point vibrational energy:\s+([\d\-\.]+)\s+kcal/mol",
"trans_enthalpy":
r"\s*Translational Enthalpy:\s+([\d\-\.]+)\s+kcal/mol",
"rot_enthalpy":
r"\s*Rotational Enthalpy:\s+([\d\-\.]+)\s+kcal/mol",
"vib_enthalpy":
r"\s*Vibrational Enthalpy:\s+([\d\-\.]+)\s+kcal/mol",
"gas_constant":
r"\s*gas constant \(RT\):\s+([\d\-\.]+)\s+kcal/mol",
"trans_entropy":
r"\s*Translational Entropy:\s+([\d\-\.]+)\s+cal/mol\.K",
"rot_entropy":
r"\s*Rotational Entropy:\s+([\d\-\.]+)\s+cal/mol\.K",
"vib_entropy":
r"\s*Vibrational Entropy:\s+([\d\-\.]+)\s+cal/mol\.K",
"total_enthalpy":
r"\s*Total Enthalpy:\s+([\d\-\.]+)\s+kcal/mol",
"total_entropy":
r"\s*Total Entropy:\s+([\d\-\.]+)\s+cal/mol\.K"
})
keys = ["ZPE", "trans_enthalpy", "rot_enthalpy", "vib_enthalpy", "gas_constant", "trans_entropy", "rot_entropy", "vib_entropy", "total_enthalpy", "total_entropy"]
for key in keys:
if temp_dict.get(key) == None:
self.data[key] = None
else:
self.data[key] = float(temp_dict.get(key)[0][0])
if temp_dict.get('frequencies') == None:
self.data['frequencies'] = None
self.data['IR_intens'] = None
self.data['IR_active'] = None
else:
temp_freqs = [
value for entry in temp_dict.get('frequencies')
for value in entry
]
temp_intens = [
value for entry in temp_dict.get('IR_intens')
for value in entry
]
active = [
value for entry in temp_dict.get('IR_active')
for value in entry
]
self.data['IR_active'] = active
freqs = np.zeros(len(temp_freqs) - temp_freqs.count('None'))
for ii, entry in enumerate(temp_freqs):
if entry != 'None':
freqs[ii] = float(entry)
self.data['frequencies'] = freqs
intens = np.zeros(len(temp_intens) - temp_intens.count('None'))
for ii, entry in enumerate(temp_intens):
if entry != 'None':
intens[ii] = float(entry)
self.data['IR_intens'] = intens
header_pattern = r"\s*Raman Active:\s+[YESNO]+\s+(?:[YESNO]+\s+)*X\s+Y\s+Z\s+(?:X\s+Y\s+Z\s+)*"
table_pattern = r"\s*[a-zA-Z][a-zA-Z\s]\s*([\d\-\.]+)\s*([\d\-\.]+)\s*([\d\-\.]+)\s*(?:([\d\-\.]+)\s*([\d\-\.]+)\s*([\d\-\.]+)\s*(?:([\d\-\.]+)\s*([\d\-\.]+)\s*([\d\-\.]+))*)*"
footer_pattern = r"TransDip\s+[\d\-\.]+\s*[\d\-\.]+\s*[\d\-\.]+\s*(?:[\d\-\.]+\s*[\d\-\.]+\s*[\d\-\.]+\s*)*"
temp_freq_mode_vecs = read_table_pattern(
self.text, header_pattern, table_pattern, footer_pattern)
freq_mode_vecs = np.zeros(
shape=(len(freqs), len(temp_freq_mode_vecs[0]), 3))
for ii, triple_FMV in enumerate(temp_freq_mode_vecs):
for jj, line in enumerate(triple_FMV):
for kk, entry in enumerate(line):
if entry != 'None':
freq_mode_vecs[int(ii * 3 + math.floor(kk / 3)),
jj, kk % 3] = float(entry)
self.data["frequency_mode_vectors"] = freq_mode_vecs
def _read_single_point_data(self):
"""
Parses final free energy information from single-point calculations.
"""
temp_dict = read_pattern(
self.text, {
"final_energy":
r"\s*SCF\s+energy in the final basis set\s+=\s*([\d\-\.]+)"
})
if temp_dict.get('final_energy') == None:
self.data['final_energy'] = None
else:
# -1 in case of pcm
# Two lines will match the above; we want final calculation
self.data['final_energy'] = float(temp_dict.get('final_energy')[-1][0])
def _read_pcm_information(self):
"""
Parses information from PCM solvent calculations.
"""
temp_dict = read_pattern(
self.text, {
"g_electrostatic": r"\s*G_electrostatic\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"g_cavitation": r"\s*G_cavitation\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"g_dispersion": r"\s*G_dispersion\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"g_repulsion": r"\s*G_repulsion\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"total_contribution_pcm": r"\s*Total\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
}
)
if temp_dict.get("g_electrostatic") is None:
self.data["g_electrostatic"] = None
else:
self.data["g_electrostatic"] = float(temp_dict.get("g_electrostatic")[0][0])
if temp_dict.get("g_cavitation") is None:
self.data["g_cavitation"] = None
else:
self.data["g_cavitation"] = float(temp_dict.get("g_cavitation")[0][0])
if temp_dict.get("g_dispersion") is None:
self.data["g_dispersion"] = None
else:
self.data["g_dispersion"] = float(temp_dict.get("g_dispersion")[0][0])
if temp_dict.get("g_repulsion") is None:
self.data["g_repulsion"] = None
else:
self.data["g_repulsion"] = float(temp_dict.get("g_repulsion")[0][0])
if temp_dict.get("total_contribution_pcm") is None:
self.data["total_contribution_pcm"] = []
else:
self.data["total_contribution_pcm"] = float(temp_dict.get("total_contribution_pcm")[0][0])
def _check_optimization_errors(self):
"""
Parses three potential optimization errors: failing to converge within the allowed number
of optimization cycles, failure to determine the lamda needed to continue, and inconsistent
size of MO files due to a linear dependence in the AO basis.
"""
if read_pattern(
self.text, {
"key": r"MAXIMUM OPTIMIZATION CYCLES REACHED"
},
terminate_on_match=True).get('key') == [[]]:
self.data["errors"] += ["out_of_opt_cycles"]
elif read_pattern(
self.text, {
"key": r"UNABLE TO DETERMINE Lamda IN FormD"
},
terminate_on_match=True).get('key') == [[]]:
self.data["errors"] += ["unable_to_determine_lamda"]
elif read_pattern(
self.text, {
"key": r"Inconsistent size for SCF MO coefficient file"
},
terminate_on_match=True).get('key') == [[]]:
self.data["errors"] += ["linear_dependent_basis"]
def _check_completion_errors(self):
"""
Parses four potential errors that can cause jobs to crash: inability to transform
coordinates due to a bad symmetric specification, an input file that fails to pass
inspection, and errors reading and writing files.
"""
if read_pattern(
self.text, {
"key":
r"Coordinates do not transform within specified threshold"
},
terminate_on_match=True).get('key') == [[]]:
self.data["errors"] += ["failed_to_transform_coords"]
elif read_pattern(
self.text,
{
"key": r"The Q\-Chem input file has failed to pass inspection"
},
terminate_on_match=True).get('key') == [[]]:
self.data["errors"] += ["input_file_error"]
elif read_pattern(
self.text, {
"key": r"Error opening input stream"
},
terminate_on_match=True).get('key') == [[]]:
self.data["errors"] += ["failed_to_read_input"]
elif read_pattern(
self.text, {
"key": r"FileMan error: End of file reached prematurely"
},
terminate_on_match=True).get('key') == [[]]:
self.data["errors"] += ["IO_error"]
elif read_pattern(
self.text, {
"key": r"Could not find \$molecule section in ParseQInput"
},
terminate_on_match=True).get('key') == [[]]:
self.data["errors"] += ["read_molecule_error"]
elif read_pattern(
self.text, {
"key": r"Welcome to Q-Chem"
},
terminate_on_match=True).get('key') != [[]]:
self.data["errors"] += ["never_called_qchem"]
else:
self.data["errors"] += ["unknown_error"]
def as_dict(self):
d = {}
d["data"] = self.data
d["text"] = self.text
d["filename"] = self.filename
return jsanitize(d, strict=True)
|
dongsenfo/pymatgen
|
pymatgen/io/qchem/outputs.py
|
Python
|
mit
| 31,716
|
[
"Q-Chem",
"pymatgen"
] |
9a17dad3cbef1076ebac99b6939ad605158a9fbcfc0e972cf7fd79c9de2bb06c
|
# Copyright 2010 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility to use the Closure Compiler CLI from Python."""
import distutils.version
import logging
import re
import subprocess
# Pulls a version number from the first line of 'java -version'
# See http://java.sun.com/j2se/versioning_naming.html to learn more about the
# command's output format.
_VERSION_REGEX = re.compile('"([0-9][.0-9]*)')
def _GetJavaVersion():
"""Returns the string for the current version of Java installed."""
proc = subprocess.Popen(['java', '-version'], stderr=subprocess.PIPE)
unused_stdoutdata, stderrdata = proc.communicate()
version_line = stderrdata.splitlines()[0]
return _VERSION_REGEX.search(version_line).group(1)
def Compile(compiler_jar_path, source_paths, flags=None):
"""Prepares command-line call to Closure Compiler.
Args:
compiler_jar_path: Path to the Closure compiler .jar file.
source_paths: Source paths to build, in order.
flags: A list of additional flags to pass on to Closure Compiler.
Returns:
The compiled source, as a string, or None if compilation failed.
"""
# User friendly version check.
if not (distutils.version.LooseVersion(_GetJavaVersion()) >=
distutils.version.LooseVersion('1.6')):
logging.error('Closure Compiler requires Java 1.6 or higher. '
'Please visit http://www.java.com/getjava')
return
args = ['java', '-jar', compiler_jar_path]
for path in source_paths:
args += ['--js', path]
if flags:
args += flags
logging.info('Compiling with the following command: %s', ' '.join(args))
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
stdoutdata, unused_stderrdata = proc.communicate()
if proc.returncode != 0:
return
return stdoutdata
|
bjohare/cloughjordan.ie
|
wp-content/themes/outreach-pro/api/OpenLayers-2.13.1/tools/closure_library_jscompiler.py
|
Python
|
cc0-1.0
| 2,337
|
[
"VisIt"
] |
765efb4a07c1969e2f31ecd5148ee922d522a09d108faa1082d6f856a082463b
|
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import re
import socket
import sys
import time
import xml.etree.ElementTree
from ..compat import (
compat_cookiejar,
compat_cookies,
compat_getpass,
compat_HTTPError,
compat_http_client,
compat_urllib_error,
compat_urllib_parse,
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urlparse,
compat_str,
)
from ..utils import (
NO_DEFAULT,
age_restricted,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
ExtractorError,
fix_xml_ampersands,
float_or_none,
int_or_none,
RegexNotFoundError,
sanitize_filename,
unescapeHTML,
url_basename,
xpath_text,
xpath_with_ns,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", or "m3u8_native".
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language_preference Is this in the correct requested
language?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
creator: The main artist who created the video.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{language: subformats}. "subformats" is a list sorted from
lower to higher preference, each element is a dictionary
with the "ext" entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "title", "description" and "id" attributes
with the same semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return m.group('id')
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
if not self._ready:
self._real_initialize()
self._ready = True
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
self.initialize()
return self._real_extract(url)
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occured.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occured.', cause=e)
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return cls.__name__[:-2]
@property
def IE_NAME(self):
return type(self).__name__[:-2]
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
""" Returns the response handle """
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if os.name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in content[:512]):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in content[:512]:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
""" Returns the data of the page as a string """
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml(self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding)
if xml_string is False:
return xml_string
if transform_source:
xml_string = transform_source(xml_string)
return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8'))
def _download_json(self, url_or_request, video_id,
note='Downloading JSON metadata',
errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True, encoding=None):
json_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding)
if (not fatal) and json_string is False:
return None
return self._parse_json(
json_string, video_id, transform_source=transform_source, fatal=fatal)
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and os.name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_login_info(self):
"""
Get the login info as (username, password)
It will look in the netrc file using the _NETRC_MACHINE value
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
username = None
password = None
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get('username', None) is not None:
username = downloader_params['username']
password = downloader_params['password']
elif downloader_params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(self._NETRC_MACHINE)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
return (username, password)
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor', None) is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^>]+?)"|\'([^>]+?)\')'
property_re = r'(?:name|property)=[\'"]og:%s[\'"]' % re.escape(prop)
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if name is None:
name = 'OpenGraph %s' % prop
escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if display_name is None:
display_name = name
return self._html_search_regex(
self._meta_regex(name),
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower(), None)
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta('isFamilyFriendly', html)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower(), None)
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
@staticmethod
def _hidden_inputs(html):
hidden_inputs = {}
for input in re.findall(r'<input([^>]+)>', html):
if not re.search(r'type=(["\'])hidden\1', input):
continue
name = re.search(r'name=(["\'])(?P<value>.+?)\1', input)
if not name:
continue
value = re.search(r'value=(["\'])(?P<value>.*?)\1', input)
if not value:
continue
hidden_inputs[name.group('value')] = value.group('value')
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?s)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(f.get(field) if f.get(field) is not None else -1 for field in field_preference)
preference = f.get('preference')
if preference is None:
proto = f.get('protocol')
if proto is None:
proto = compat_urllib_parse_urlparse(f.get('url', '')).scheme
preference = 0 if proto in ['http', 'https'] else -0.1
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
if f.get('vcodec') == 'none': # audio only
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
def _is_valid_url(self, url, video_id, item='video'):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item)
return True
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError):
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
raise
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip()):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source)
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
for i, media_el in enumerate(media_nodes):
if manifest_version == '2.0':
media_url = media_el.attrib.get('href') or media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ('/'.join(manifest_url.split('/')[:-1]) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
if determine_ext(manifest_url) == 'f4m':
formats.extend(self._extract_f4m_formats(manifest_url, video_id, preference, f4m_id))
continue
tbr = int_or_none(media_el.attrib.get('bitrate'))
formats.append({
'format_id': '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)])),
'url': manifest_url,
'ext': 'flv',
'tbr': tbr,
'width': int_or_none(media_el.attrib.get('width')),
'height': int_or_none(media_el.attrib.get('height')),
'preference': preference,
})
self._sort_formats(formats)
return formats
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True):
formats = [{
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 1 if preference else -1,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}]
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
m3u8_doc = self._download_webpage(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal)
if m3u8_doc is False:
return m3u8_doc
last_info = None
last_media = None
kv_rex = re.compile(
r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_info = {}
for m in kv_rex.finditer(line):
v = m.group('val')
if v.startswith('"'):
v = v[1:-1]
last_info[m.group('key')] = v
elif line.startswith('#EXT-X-MEDIA:'):
last_media = {}
for m in kv_rex.finditer(line):
v = m.group('val')
if v.startswith('"'):
v = v[1:-1]
last_media[m.group('key')] = v
elif line.startswith('#') or not line.strip():
continue
else:
if last_info is None:
formats.append({'url': format_url(line)})
continue
tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
last_media_name = last_media.get('NAME') if last_media and last_media.get('TYPE') != 'SUBTITLES' else None
format_id.append(last_media_name if last_media_name else '%d' % (tbr if tbr else len(formats)))
f = {
'format_id': '-'.join(format_id),
'url': format_url(line.strip()),
'tbr': tbr,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
codecs = last_info.get('CODECS')
if codecs:
# TODO: looks like video codec is not always necessarily goes first
va_codecs = codecs.split(',')
if va_codecs[0]:
f['vcodec'] = va_codecs[0].partition('.')[0]
if len(va_codecs) > 1 and va_codecs[1]:
f['acodec'] = va_codecs[1].partition('.')[0]
resolution = last_info.get('RESOLUTION')
if resolution:
width_str, height_str = resolution.split('x')
f['width'] = int(width_str)
f['height'] = int(height_str)
if last_media is not None:
f['m3u8_media'] = last_media
last_media = None
formats.append(f)
last_info = {}
self._sort_formats(formats)
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
return {
'id': video_id,
'title': title or video_id,
'description': description,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
videos = smil.findall(self._xpath_ns('.//video', namespace))
for video in videos:
src = video.get('src')
if not src:
continue
bitrate = int_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
filesize = int_or_none(video.get('size') or video.get('fileSize'))
width = int_or_none(video.get('width'))
height = int_or_none(video.get('height'))
proto = video.get('proto')
ext = video.get('ext')
src_ext = determine_ext(src)
streamer = video.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
if proto == 'm3u8' or src_ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls'))
continue
if src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse.urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds'))
continue
if src_url.startswith('http'):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
continue
self._sort_formats(formats)
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src:
continue
ext = textstream.get('ext') or determine_ext(src)
if not ext:
type_ = textstream.get('type')
SUBTITLES_TYPES = {
'text/vtt': 'vtt',
'text/srt': 'srt',
'application/smptett+xml': 'tt',
}
if type_ in SUBTITLES_TYPES:
ext = SUBTITLES_TYPES[type_]
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
xspf = self._download_xml(
playlist_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(xspf, playlist_id)
def _parse_xspf(self, playlist, playlist_id):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = [{
'url': location.text,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
} for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime("%Y-%m-%d %H:%M")
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None):
cookie = compat_cookiejar.Cookie(
0, name, value, None, None, domain, None,
None, '/', True, False, expire_time, '', None, None, None)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = compat_urllib_request.Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if 'playlist' in tc:
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or
self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError("This method must be implemented by subclasses")
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError("This method must be implemented by subclasses")
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError("This method must be implemented by subclasses")
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
|
eric-stanley/youtube-dl
|
youtube_dl/extractor/common.py
|
Python
|
unlicense
| 57,102
|
[
"VisIt"
] |
f647f7484cb2e385c283bbae02301f20e5cda8dab9447dd68f0dbc7a4b377bb6
|
#!/usr/bin/env python3
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Verifies that all source files contain the necessary copyright boilerplate
# snippet.
import argparse
import datetime
import glob
import os
import re
import sys
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames",
help="list of files to check, all files if unspecified",
nargs='*')
rootdir = os.path.abspath('.')
parser.add_argument("--rootdir",
default=rootdir,
help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "verify/boilerplate")
parser.add_argument("--boilerplate-dir", default=default_boilerplate_dir)
parser.add_argument(
'--skip',
default=[
'external/bazel_tools',
'.git',
'node_modules',
'_output',
'third_party',
'vendor',
'verify/boilerplate/test',
'verify_boilerplate.py',
],
action='append',
help='Customize paths to avoid',
)
return parser.parse_args()
def get_refs():
refs = {}
template_dir = ARGS.boilerplate_dir
if not os.path.isdir(template_dir):
template_dir = os.path.dirname(template_dir)
for path in glob.glob(os.path.join(template_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
# Pass the encoding parameter to avoid ascii decode error for some
# platform.
ref_file = open(path, 'r', encoding='utf-8')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
# given the file contents, return true if the file appears to be generated
def is_generated(data):
if re.search(r"^// Code generated by .*\. DO NOT EDIT\.$", data, re.MULTILINE):
return True
return False
def file_passes(filename, refs, regexs): # pylint: disable=too-many-locals
try:
# Pass the encoding parameter to avoid ascii decode error for some
# platform.
with open(filename, 'r', encoding='utf-8') as fp:
file_data = fp.read()
except IOError:
return False
if not file_data:
return True # Nothing to copyright in this empty file.
basename = os.path.basename(filename)
extension = file_extension(filename)
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
ref = ref.copy()
# remove build tags from the top of Go files
if extension == "go":
con = regexs["go_build_constraints"]
(file_data, found) = con.subn("", file_data, 1)
# remove shebang from the top of shell files
if extension in ("sh", "py"):
she = regexs["shebang"]
(file_data, found) = she.subn("", file_data, 1)
data = file_data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
# check if we encounter a 'YEAR' placeholder if the file is generated
if is_generated(file_data):
for i, line in enumerate(data):
if "Copyright YEAR" in line:
return False
return True
year = regexs["year"]
for datum in data:
if year.search(datum):
return False
# Replace all occurrences of the regex "2017|2016|2015|2014" with "YEAR"
when = regexs["date"]
for idx, datum in enumerate(data):
(data[idx], found) = when.subn('YEAR', datum)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
# even when generated by bazel we will complain about some generated files
# not having the headers. since they're just generated, ignore them
IGNORE_HEADERS = ['// Code generated by go-bindata.']
def has_ignored_header(pathname):
# Pass the encoding parameter to avoid ascii decode error for some
# platform.
with open(pathname, 'r', encoding='utf-8') as myfile:
data = myfile.read()
for header in IGNORE_HEADERS:
if data.startswith(header):
return True
return False
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in ARGS.skip):
continue
newfiles.append(pathname)
for idx, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[idx] = os.path.join(ARGS.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if ARGS.filenames:
files = ARGS.filenames
else:
for root, dirs, walkfiles in os.walk(ARGS.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for dpath in ARGS.skip:
if dpath in dirs:
dirs.remove(dpath)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
if not has_ignored_header(pathname):
outfiles.append(pathname)
return outfiles
def get_dates():
years = datetime.datetime.now().year
return '(%s)' % '|'.join((str(year) for year in range(2014, years + 1)))
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile('YEAR')
# dates can be any year between 2014 and the current year, company holder names can be anything
regexs["date"] = re.compile(get_dates())
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(//( \+build|go:build).*\n)+\n",
re.MULTILINE)
# strip #!.* from shell/python scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
return regexs
def nonconforming_lines(files):
yield '%d files have incorrect boilerplate headers:' % len(files)
for fp in files:
yield os.path.relpath(fp, ARGS.rootdir)
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
nonconforming_files = []
for filename in sorted(filenames):
if not file_passes(filename, refs, regexs):
nonconforming_files.append(filename)
if nonconforming_files:
for line in nonconforming_lines(nonconforming_files):
print(line)
sys.exit(1)
if __name__ == "__main__":
ARGS = get_args()
main()
|
kubernetes/repo-infra
|
hack/verify_boilerplate.py
|
Python
|
apache-2.0
| 7,790
|
[
"VisIt"
] |
6928a807e2ffc2e6902de705ce7a7cddbbbf5ef61a43901432c4f4551b4e9a7b
|
from numpy import pi, sqrt, sin, cos, tan
#
# Based on "Redfearn's" formula from the The GDA Technical Manual
# http://www.icsm.gov.au/gda/gdatm/
#
# some ellipsoids
# Semi major axis Inverse flattening Central Scale factor
# GRS80 6378137.0 298.257222101 0.9996
# WGS84 6378137.0 298.257223563 0.9996
#
class geodetic:
CSF = 0.9996; #central scale factor
FalseEasting = 500000.0;
FalseNorthing = 10000000.0; # for southern hemisphere
ZoneWidth = 6.0 #in degrees
# Longitude of initial central meridian (Zone one)
CMZ1 = -177.0;
BAND_LOOKUP = 'CDEFGHJKLMNPQRSTUVWXX'
def __init__(self, a=6378137.0, f_i=298.257222101):
# semi-major axis
self.a = a
# flattening
self.f = 1.0/f_i;
# eccentricity squared
self.e2 = (2.0 - self.f) * self.f;
def computeZoneAndBand(self, lat, lon):
if (lat < -80.0):
if (lon < 0.0):
band = 'A'
else:
band = 'B'
elif (lat > 84.0):
if (lon < 0.0):
band = 'Y'
else:
band = 'Z'
else:
zone = int((lon-self.CMZ1+self.ZoneWidth/2.0)/self.ZoneWidth) + 1;
band = self.BAND_LOOKUP[int(lat+80)/8]
return (zone, band)
def geoToGrid(self, lat_d, lon_d, zone, band):
lat = lat_d*pi/180.0;
# Meridian distance
e2 = self.e2
e4 = e2*e2;
e6 = e4*e2;
A0 = 1-(e2/4.0)-(3.0*e4/64.0)-(5.0*e6/256.0);
A2 = (3.0/8.0)*(e2+e4/4.0+15.0*e6/128.0);
A4 = (15.0/256.0)*(e4+3.0*e6/4.0);
A6 = 35.0*e6/3072.0;
s = sin(lat);
s2 = sin(2.0*lat);
s4 = sin(4.0*lat);
s6 = sin(6.0*lat);
m = self.a*(A0*lat-A2*s2+A4*s4-A6*s6);
# Radii of curvature.
rho = self.a*(1-e2)/((1.0-e2*s*s)**(3.0/2.0));
nu = self.a/sqrt(1-e2*s*s);
psi = nu / rho;
psi2 = psi*psi;
psi3 = psi*psi2;
psi4 = psi*psi3;
# Geographical to Grid
# longitude of central meridian of zone (degrees)
self.LongCMZ = (zone - 1) * self.ZoneWidth + self.CMZ1;
# the arc distance from central meridian (radians)
w = (lon_d - self.LongCMZ)*pi/180.0;
w2 = w*w;
w3 = w*w2;
w4 = w*w3;
w5 = w*w4;
w6 = w*w5;
w7 = w*w6;
w8 = w*w7;
c = cos(lat);
c3 = c*c*c;
c5 = c*c*c3;
c7 = c*c*c5;
t = tan(lat);
t2 = t*t;
t4 = t2*t2;
t6 = t2*t4;
# Northing
term1 = w2*c/2.0;
term2 = w4*c3*(4.0*psi2+psi-t2)/24.0;
term3 = w6*c5*(8.0*psi4*(11.0-24.0*t2)-28*psi3*(1-6.0*t2)+psi2*(1-32*t2)-psi*(2.0*t2)+t4)/720.0;
term4 = w8*c7*(1385.0-3111.0*t2+543.0*t4-t6)/40320.0;
northing = self.CSF*(m+nu*s*(term1+term2+term3+term4));
if (band < 'N'):
northing += self.FalseNorthing
# Easting
term1 = w*c;
term2 = w3*c3*(psi-t2)/6.0;
term3 = w5*c5*(4.0*psi3*(1.0-6.0*t2)+psi2*(1.0+8.0*t2)-psi*(2.0*t2)+t4)/120.0;
term4 = w7*c7*(61.0-479.0*t2+179.0*t4-t6)/5040.0;
easting = nu*self.CSF*(term1+term2+term3+term4) + self.FalseEasting;
return (northing, easting)
def gridToGeo(self, northing, easting, zone, band):
E_ = easting - self.FalseEasting
N_ = northing;
if (band < 'N'):
N_ -= self.FalseNorthing
m = N_/self.CSF
# Foot-point Latitude
n = self.f/(2.0-self.f)
n2 = n*n
n3 = n2*n
n4 = n2*n2
G = self.a*(1.-n)*(1.-n2)*(1.+(9./4.)*n2+(225./64.)*n4)*(pi/180.)
sigma = (m*pi)/(180*G)
phi_ = sigma +\
((3.*n/2.)-(27.*n3/32.))*sin(2.*sigma) + \
((21.*n2/16.)-(55.*n4/32.))*sin(4.*sigma) +\
(151.*n3/96.)*sin(6.*sigma) +\
(1097.*n4/512.)*sin(8.*sigma);
# Radii of curvature. (using foot point latitude)
s_ = sin(phi_)
e2 = self.e2
rho_ = self.a*(1-e2)/((1.0-e2*s_*s_)**(3.0/2.0))
nu_ = self.a/sqrt(1-e2*s_*s_)
psi_ = nu_ / rho_
psi2_ = psi_*psi_
psi3_ = psi2_*psi_
psi4_ = psi2_*psi2_
x = E_/(self.CSF*nu_)
x3 = x*x*x
x5 = x3*x*x
x7 = x5*x*x
t_ = tan(phi_)
t2_ = t_*t_;
t4_ = t2_*t2_;
t6_ = t2_*t4_;
tkr_ = (t_/(self.CSF*rho_))
term1 = tkr_*(x*E_/2.)
term2 = tkr_*(E_*x3/24.)*(-4.*psi2_ + 9.*psi_*(1.-t2_) + 12.*t2_)
term3 = tkr_*(E_*x5/720.)*(8.*psi4_*(11.-24.*t2_)\
- 12.*psi3_*(21.-71.*t2_)\
+ 15.*psi2_*(15.-98.*t2_+15.*t4_)\
+ 180.*psi_*(5.*t2_-3.*t4_)\
+ 360.*t4_)
term4 = tkr_*(E_*x7/40320.)*(1385. + 3633.*t2_ + 4095.*t4_ + 1575.*t6_)
phi = phi_ - term1 +term2 - term3 + term4
lat = phi*180/pi
sec_phi_ = 1.0/cos(phi_)
term1 = x * sec_phi_
term2 = (x3/6.)*sec_phi_*(psi_ + 2.*t2_)
term3 = (x5/120.)*sec_phi_*(-4.*psi3_*(1.0-6*t2_) + psi2_*(9.-68.*t2_)\
+ 72.*psi_*t2_ + 24.*t4_)
term4 = (x7/5040.)*sec_phi_*(61. + 662.*t2_ + 1320.*t4_ + 720.*t6_)
w = term1 - term2 + term3 - term4
lambda0 = (self.CMZ1+(zone-1)*self.ZoneWidth)*pi/180.0;
_lambda = lambda0 + w
lon = _lambda*180.0/pi
return (lat, lon)
|
CanberraUAV/cuav
|
cuav/uav/geo.py
|
Python
|
gpl-3.0
| 4,936
|
[
"Psi4"
] |
c8dc544a3b8ff99cc5ae3a6c0845ab688f1083a711ff81cba105f95f03958914
|
from pycp2k.inputsection import InputSection
from ._each255 import _each255
class _wannier901(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.Seed_name = None
self.Mp_grid = None
self.Added_mos = None
self.Exclude_bands = []
self.Wannier_functions = []
self.EACH = _each255()
self._name = "WANNIER90"
self._keywords = {'Log_print_key': 'LOG_PRINT_KEY', 'Add_last': 'ADD_LAST', 'Common_iteration_levels': 'COMMON_ITERATION_LEVELS', 'Mp_grid': 'MP_GRID', 'Filename': 'FILENAME', 'Added_mos': 'ADDED_MOS', 'Seed_name': 'SEED_NAME'}
self._repeated_keywords = {'Wannier_functions': 'WANNIER_FUNCTIONS', 'Exclude_bands': 'EXCLUDE_BANDS'}
self._subsections = {'EACH': 'EACH'}
self._aliases = {'Added_bands': 'Added_mos'}
self._attributes = ['Section_parameters']
@property
def Added_bands(self):
"""
See documentation for Added_mos
"""
return self.Added_mos
@Added_bands.setter
def Added_bands(self, value):
self.Added_mos = value
|
SINGROUP/pycp2k
|
pycp2k/classes/_wannier901.py
|
Python
|
lgpl-3.0
| 1,288
|
[
"Wannier90"
] |
95072b25cf5c3b17297b606c95802561fe39cbffb7327c5bdf6a67eb5df4e38f
|
# Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# cfg tool
oneline = "Convert LAMMPS snapshots to AtomEye CFG format"
docstr = """
c = cfg(d) d = object containing atom coords (dump, data)
c.one() write all snapshots to tmp.cfg
c.one("new") write all snapshots to new.cfg
c.many() write snapshots to tmp0000.cfg, tmp0001.cfg, etc
c.many("new") write snapshots to new0000.cfg, new0001.cfg, etc
c.single(N) write snapshot for timestep N to tmp.cfg
c.single(N,"file") write snapshot for timestep N to file.cfg
"""
# History
# 11/06, Aidan Thompson (SNL): original version
# ToDo list
# should decide if dump is scaled or not, since CFG prints in scaled coords
# this creates a simple AtomEye CFG format
# there is more complex format we could write out
# which allows for extra atom info, e.g. to do atom coloring on
# how to dump for a triclinic box, since AtomEye accepts this
# Variables
# data = data file to read from
# Imports and external programs
import sys
# Class definition
class cfg:
# --------------------------------------------------------------------
def __init__(self,data):
self.data = data
# --------------------------------------------------------------------
def one(self,*args):
if len(args) == 0: file = "tmp.cfg"
elif args[0][-4:] == ".cfg": file = args[0]
else: file = args[0] + ".cfg"
f = open(file,"w")
n = flag = 0
while 1:
which,time,flag = self.data.iterator(flag)
if flag == -1: break
time,box,atoms,bonds,tris,lines = self.data.viz(which)
xlen = box[3]-box[0]
ylen = box[4]-box[1]
zlen = box[5]-box[2]
print >>f,"Number of particles = %d " % len(atoms)
print >>f,"# Timestep %d \n#\nA = 1.0 Angstrom" % time
print >>f,"H0(1,1) = %20.10f A " % xlen
print >>f,"H0(1,2) = 0.0 A "
print >>f,"H0(1,3) = 0.0 A "
print >>f,"H0(2,1) = 0.0 A "
print >>f,"H0(2,2) = %20.10f A " % ylen
print >>f,"H0(2,3) = 0.0 A "
print >>f,"H0(3,1) = 0.0 A "
print >>f,"H0(3,2) = 0.0 A "
print >>f,"H0(3,3) = %20.10f A " % zlen
print >>f,"#"
for atom in atoms:
itype = int(atom[1])
xfrac = (atom[2]-box[0])/xlen
yfrac = (atom[3]-box[1])/ylen
zfrac = (atom[4]-box[2])/zlen
# print >>f,"1.0 %d %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f " % (itype,xfrac,yfrac,zfrac,atom[5],atom[6],atom[7])
print >>f,"1.0 %d %15.10f %15.10f %15.10f 0.0 0.0 0.0 " % (itype,xfrac,yfrac,zfrac)
print time,
sys.stdout.flush()
n += 1
f.close()
print "\nwrote %d snapshots to %s in CFG format" % (n,file)
# --------------------------------------------------------------------
def many(self,*args):
if len(args) == 0: root = "tmp"
else: root = args[0]
n = flag = 0
while 1:
which,time,flag = self.data.iterator(flag)
if flag == -1: break
time,box,atoms,bonds,tris,lines = self.data.viz(which)
if n < 10:
file = root + "000" + str(n)
elif n < 100:
file = root + "00" + str(n)
elif n < 1000:
file = root + "0" + str(n)
else:
file = root + str(n)
file += ".cfg"
f = open(file,"w")
xlen = box[3]-box[0]
ylen = box[4]-box[1]
zlen = box[5]-box[2]
print >>f,"Number of particles = %d " % len(atoms)
print >>f,"# Timestep %d \n#\nA = 1.0 Angstrom" % time
print >>f,"H0(1,1) = %20.10f A " % xlen
print >>f,"H0(1,2) = 0.0 A "
print >>f,"H0(1,3) = 0.0 A "
print >>f,"H0(2,1) = 0.0 A "
print >>f,"H0(2,2) = %20.10f A " % ylen
print >>f,"H0(2,3) = 0.0 A "
print >>f,"H0(3,1) = 0.0 A "
print >>f,"H0(3,2) = 0.0 A "
print >>f,"H0(3,3) = %20.10f A " % zlen
print >>f,"#"
for atom in atoms:
itype = int(atom[1])
xfrac = (atom[2]-box[0])/xlen
yfrac = (atom[3]-box[1])/ylen
zfrac = (atom[4]-box[2])/zlen
# print >>f,"1.0 %d %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f " % (itype,xfrac,yfrac,zfrac,atom[5],atom[6],atom[7])
print >>f,"1.0 %d %15.10f %15.10f %15.10f 0.0 0.0 0.0 " % (itype,xfrac,yfrac,zfrac)
print time,
sys.stdout.flush()
f.close()
n += 1
print "\nwrote %s snapshots in CFG format" % n
# --------------------------------------------------------------------
def single(self,time,*args):
if len(args) == 0: file = "tmp.cfg"
elif args[0][-4:] == ".cfg": file = args[0]
else: file = args[0] + ".cfg"
which = self.data.findtime(time)
time,box,atoms,bonds,tris,lines = self.data.viz(which)
f = open(file,"w")
xlen = box[3]-box[0]
ylen = box[4]-box[1]
zlen = box[5]-box[2]
print >>f,"Number of particles = %d " % len(atoms)
print >>f,"# Timestep %d \n#\nA = 1.0 Angstrom" % time
print >>f,"H0(1,1) = %20.10f A " % xlen
print >>f,"H0(1,2) = 0.0 A "
print >>f,"H0(1,3) = 0.0 A "
print >>f,"H0(2,1) = 0.0 A "
print >>f,"H0(2,2) = %20.10f A " % ylen
print >>f,"H0(2,3) = 0.0 A "
print >>f,"H0(3,1) = 0.0 A "
print >>f,"H0(3,2) = 0.0 A "
print >>f,"H0(3,3) = %20.10f A " % zlen
print >>f,"#"
for atom in atoms:
itype = int(atom[1])
xfrac = (atom[2]-box[0])/xlen
yfrac = (atom[3]-box[1])/ylen
zfrac = (atom[4]-box[2])/zlen
# print >>f,"1.0 %d %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f " % (itype,xfrac,yfrac,zfrac,atom[5],atom[6],atom[7])
print >>f,"1.0 %d %15.10f %15.10f %15.10f 0.0 0.0 0.0 " % (itype,xfrac,yfrac,zfrac)
f.close()
|
quang-ha/lammps
|
tools/python/pizza/cfg.py
|
Python
|
gpl-2.0
| 6,108
|
[
"LAMMPS"
] |
5f42a61b4a92182e1412eb57e76d454fbbe00f6990dc537dea94ebf2c4f77a39
|
import unittest, re, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i
print "FIX!: Avoiding an RF histogram assertion error on some of these datasets"
DO_RF = False
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(node_count=1)
else:
h2o_hosts.build_cloud_with_hosts(node_count=1)
global SYNDATASETS_DIR
SYNDATASETS_DIR = h2o.make_syn_dir()
h2o.beta_features = True
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_many_parse1(self):
rows = self.genrows1()
set = 1
self.tryThemAll(set,rows)
def test_many_parse2(self):
rows = self.genrows2()
set = 2
self.tryThemAll(set,rows)
# this one has problems with blank lines
def test_many_parse3(self):
rows = self.genrows3()
set = 3
self.tryThemAll(set,rows)
def genrows1(self):
# comment has to have # in first column? (no leading whitespace)
# FIX! what about blank fields and spaces as sep
# FIX! temporary need more lines to avoid sample error in H2O
# throw in some variants for leading 0 on the decimal, and scientific notation
rows = [
"# 'comment, is okay",
'# "this comment, is okay too',
"# 'this' comment, is okay too",
"FirstName|MiddleInitials|LastName|DateofBirth",
"0|0.5|1|0",
"3|NaN|4|1",
"6||8|0",
"0.6|0.7|0.8|1",
"+0.6|+0.7|+0.8|0",
"-0.6|-0.7|-0.8|1",
".6|.7|.8|0",
"+.6|+.7|+.8|1",
"-.6|-.7|-.8|0",
"+0.6e0|+0.7e0|+0.8e0|1",
"-0.6e0|-0.7e0|-0.8e0|0",
".6e0|.7e0|.8e0|1",
"+.6e0|+.7e0|+.8e0|0",
"-.6e0|-.7e0|-.8e0|1",
"+0.6e00|+0.7e00|+0.8e00|0",
"-0.6e00|-0.7e00|-0.8e00|1",
".6e00|.7e00|.8e00|0",
"+.6e00|+.7e00|+.8e00|1",
"-.6e00|-.7e00|-.8e00|0",
"+0.6e-01|+0.7e-01|+0.8e-01|1",
"-0.6e-01|-0.7e-01|-0.8e-01|0",
".6e-01|.7e-01|.8e-01|1",
"+.6e-01|+.7e-01|+.8e-01|0",
"-.6e-01|-.7e-01|-.8e-01|1",
"+0.6e+01|+0.7e+01|+0.8e+01|0",
"-0.6e+01|-0.7e+01|-0.8e+01|1",
".6e+01|.7e+01|.8e+01|0",
"+.6e+01|+.7e+01|+.8e+01|1",
"-.6e+01|-.7e+01|-.8e+01|0",
"+0.6e102|+0.7e102|+0.8e102|1",
"-0.6e102|-0.7e102|-0.8e102|0",
".6e102|.7e102|.8e102|1",
"+.6e102|+.7e102|+.8e102|0",
"-.6e102|-.7e102|-.8e102|1",
]
return rows
# "# comment here is okay",
# "# comment here is okay too",
# FIX! needed an extra line to avoid bug on default 67+ sample?
def genrows2(self):
rows = [
"FirstName|MiddleInitials|LastName|DateofBirth",
"Kalyn|A.|Dalton|1967-04-01",
"Gwendolyn|B.|Burton|1947-10-26",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31"
]
return rows
# update spec
# intermixing blank lines in the first two lines breaks things
# blank lines cause all columns except the first to get NA (red)
# first may get a blank string? (not ignored)
def genrows3(self):
rows = [
"# comment here is okay",
"# comment here is okay too",
"FirstName|MiddleInitials|LastName|DateofBirth",
"Kalyn|A.|Dalton|1967-04-01",
"",
"Gwendolyn||Burton|1947-10-26",
"",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
]
return rows
# The 3 supported line-ends
# FIX! should test them within quoted tokens
eolDict = {
0:"\n",
1:"\r\n",
2:"\r"
}
tokenChangeDict = {
0:[' ',' '], # double space
1:[' ',' '], # unbalanced space
2:[' ',' '], # unblanced space3
}
def changeTokens(self,rows,tokenCase):
[cOpen,cClose] = self.tokenChangeDict[tokenCase]
newRows = []
for r in rows:
# don't quote lines that start with #
# can quote lines start with some spaces or tabs? maybe
comment = re.match(r'^[ \t]*#', r)
empty = re.match(r'^$',r)
if not (comment or empty):
r = re.sub('^',cOpen,r)
r = re.sub('\|',cClose + '|' + cOpen,r)
r = re.sub('$',cClose,r)
h2o.verboseprint(r)
newRows.append(r)
return newRows
def writeRows(self,csvPathname,rows,eol):
f = open(csvPathname, 'w')
for r in rows:
f.write(r + eol)
# what about case of missing eoll at end of file?
sepChangeDict = {
# NEW: Hive datasets use 0x01 hex char as SEP, so now legal in our parser spec
0:"",
1:" ", # double space
2:" ",
3:",",
4:"\t",
}
def changeSep(self,rows,sepCase):
# do a trial replace, to see if we get a <tab><sp> problem
# comments at the beginning..get a good row
r = rows[-1]
tabseptab = re.search(r'\t|\t', r)
spsepsp = re.search(r' | ', r)
# NOTE: we don't care about this because we don't have quoted strings
# in this test. we do care in the other parse_many_cases test
if 1==1:
newSep = self.sepChangeDict[sepCase]
else:
if tabseptab or spsepsp:
# use comma instead. always works
# print "Avoided"
newSep = ","
else:
newSep = self.sepChangeDict[sepCase]
newRows = [r.replace('|',newSep) for r in rows]
return newRows
def tryThemAll(self,set,rows):
for eolCase in range(len(self.eolDict)):
eol = self.eolDict[eolCase]
# change tokens must be first
for tokenCase in range(len(self.tokenChangeDict)):
newRows1 = self.changeTokens(rows,tokenCase)
for sepCase in range(len(self.sepChangeDict)):
newRows2 = self.changeSep(newRows1,sepCase)
csvPathname = SYNDATASETS_DIR + '/parsetmp_' + \
str(set) + "_" + \
str(eolCase) + "_" + \
str(tokenCase) + "_" + \
str(sepCase) + \
'.data'
self.writeRows(csvPathname,newRows2,eol)
if "'" in self.tokenChangeDict[tokenCase][0]:
single_quotes = 1
else:
single_quotes = 0
parseResult = h2i.import_parse(path=csvPathname, schema='put', single_quotes=single_quotes,
noPrint=not h2o.verbose)
if DO_RF:
h2o_cmd.runRF(parseResult=parseResult, trees=1, timeoutSecs=30, retryDelaySecs=0.1)
h2o.verboseprint("Set", set)
sys.stdout.write('.')
sys.stdout.flush()
if __name__ == '__main__':
h2o.unit_main()
|
woobe/h2o
|
py/testdir_single_jvm/test_parse_many_doublesp_fvec.py
|
Python
|
apache-2.0
| 7,703
|
[
"Dalton"
] |
f20ad86b1f81dc30783292774942a46630b25225efa6d7053466381ec8e11639
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from textwrap import dedent
from pants.backend.docgen.targets.doc import Page, Wiki, WikiArtifact
from pants.base.build_environment import get_buildroot
from pants.build_graph.address import Address
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.testutil.test_base import TestBase
class WikiPageTest(TestBase):
@classmethod
def alias_groups(cls):
return BuildFileAliases(
targets={"page": Page},
objects={
"Wiki": Wiki,
"wiki_artifact": WikiArtifact,
"confluence": Wiki(name="confluence_wiki", url_builder=None),
},
)
def setUp(self):
super().setUp()
self.add_to_build_file(
"src/docs",
dedent(
"""
page(name='readme',
source='README.md',
links=[':readme2'],
provides=[
wiki_artifact(
wiki=confluence,
space='~areitz',
title='test_page',
),
],
)
page(name='readme2',
sources=['README2.md'],
links=[':readme'],
provides=[
wiki_artifact(
wiki=confluence,
space='~areitz',
title='test_page2',
),
],
)
"""
),
)
self.create_file(
"src/docs/README.md",
contents=dedent(
"""
some text
* [[Link to the other readme file|pants('src/docs:readme2')]]
some text
* [[Link AGAIN to the other readme file|pants('src/docs:readme2')]]
"""
),
)
self.create_file(
"src/docs/README2.md",
contents=dedent(
"""
This is the second readme file! Isn't it exciting?
[[link to the first readme file|pants('src/docs:readme')]]
"""
),
)
def test_wiki_page(self):
p = self.target("src/docs:readme")
self.assertIsInstance(p, Page)
self.assertIsInstance(p.provides[0], WikiArtifact)
self.assertIsInstance(p.provides[0].wiki, Wiki)
self.assertTrue(isinstance(p, Page), f"{p} isn't an instance of Page")
self.assertTrue(
isinstance(p.provides[0], WikiArtifact), f"{p} isn't an instance of WikiArtifact"
)
self.assertTrue(isinstance(p.provides[0].wiki, Wiki), f"{p} isn't an instance of Wiki")
self.assertEqual("~areitz", p.provides[0].config["space"])
self.assertEqual("test_page", p.provides[0].config["title"])
self.assertFalse("parent" in p.provides[0].config)
# Check to make sure the 'readme2' target has been loaded into the build graph (via parsing of
# the 'README.md' page)
address = Address.parse("src/docs:readme2", relative_to=get_buildroot())
self.assertEqual(p._build_graph.get_target(address), self.target("src/docs:readme2"))
def test_wiki_page_fingerprinting(self):
def create_page_target(space):
self.reset_build_graph()
self.create_file("src/docs/BUILD")
self.add_to_build_file(
"src/docs",
dedent(
"""
page(name='readme3',
source='README.md',
provides=[
wiki_artifact(
wiki=confluence,
space='~"""
+ space
+ """',
title='test_page3',
),
],
)
"""
),
)
return self.target("src/docs:readme3")
fingerprint_before = create_page_target("space1").payload.fingerprint()
self.assertEqual(fingerprint_before, create_page_target("space1").payload.fingerprint())
self.assertNotEqual(fingerprint_before, create_page_target("space2").payload.fingerprint())
def test_no_sources(self):
self.add_to_build_file("", "page(name='page', sources=[])")
with self.assertRaisesRegex(AddressLookupError, r"//:page.*exactly 1 source, but found 0"):
self.target(":page")
def test_multiple_sources(self):
self.create_files("", ["exists.md", "also-exists.md"])
self.add_to_build_file("", "page(name='page', sources=['exists.md', 'also-exists.md'])")
with self.assertRaisesRegex(AddressLookupError, r"//:page.*exactly 1 source, but found 2"):
self.target(":page")
def test_source_and_sources(self):
self.create_files("", ["exists.md", "also-exists.md"])
self.add_to_build_file(
"", "page(name='page', source=['exists.md'], sources=['also-exists.md'])",
)
with self.assertRaisesRegex(
AddressLookupError, r"//:page: Cannot specify both source and sources attribute"
):
self.target(":page")
|
tdyas/pants
|
tests/python/pants_test/backend/docgen/targets/test_wiki_page.py
|
Python
|
apache-2.0
| 5,528
|
[
"exciting"
] |
cb822249a8165903a2a34e0641f691b8be7076353e1d83cb484379c5f625e809
|
#!/usr/bin/env python3
# @package adjust_timeline
# \author Andy Aschwanden, University of Alaska Fairbanks, USA
# \brief Script adjusts a time axis of a file.
# \details Script adjusts the time axis of a file.
# Say you have monthly climate forcing from 1980-1-1 through 2001-1-1 in
# the forcing file foo_1980-1999.nc to be used with, e.g. -surface_given_file,
# but you want the model to run from 1991-1-1 through 2001-1-1.
#
# Usage:
#
# \verbatim $ adjust_timeline.py --start_date '1991-1-1'
# time_1991-2000.nc \endverbatim
import os
from argparse import ArgumentParser
from dateutil import rrule
from dateutil.parser import parse
from datetime import datetime
import time
import numpy as np
try:
import netCDF4 as netCDF
except:
print("netCDF4 is not installed!")
sys.exit(1)
NC = netCDF.Dataset
import cftime
# Set up the option parser
parser = ArgumentParser()
parser.description = """Script adjusts the time file with time and time
bounds that can be used to determine to force PISM via command line
option -time_file or adjust the time axis for postprocessing."""
parser.add_argument("FILE", nargs="*")
parser.add_argument(
"-p",
"--periodicity",
dest="periodicity",
help="""periodicity, e.g. monthly, daily, etc. Default=monthly""",
default="monthly",
)
parser.add_argument(
"-a",
"--start_date",
dest="start_date",
help="""Start date in ISO format. Default=1989-1-1""",
default="1989-1-1",
)
parser.add_argument(
"-c",
"--calendar",
dest="calendar",
choices=["standard", "gregorian", "no_leap", "365_day", "360_day", "julian"],
help="""Sets the calendar. Default="standard".""",
default="standard",
)
parser.add_argument(
"-i",
"--interval_type",
dest="interval_type",
choices=["start", "mid", "end"],
help="""Defines whether the time values t_k are the end points of the time bounds tb_k or the mid points 1/2*(tb_k -tb_(k-1)). Default="mid".""",
default="mid",
)
parser.add_argument(
"-u",
"--ref_unit",
dest="ref_unit",
help="""Reference unit. Default=days. Use of months or
years is NOT recommended.""",
default="days",
)
parser.add_argument(
"-d",
"--ref_date",
dest="ref_date",
help="""Reference date. Default=1960-1-1""",
default="1960-1-1",
)
options = parser.parse_args()
interval_type = options.interval_type
periodicity = options.periodicity.upper()
start_date = parse(options.start_date)
ref_unit = options.ref_unit
ref_date = options.ref_date
args = options.FILE
infile = args[0]
time1 = time.time()
nc = NC(infile, "a")
nt = len(nc.variables["time"])
time_units = "%s since %s" % (ref_unit, ref_date)
calendar = options.calendar
# create a dictionary so that we can supply the periodicity as a
# command-line argument.
pdict = {}
pdict["SECONDLY"] = rrule.SECONDLY
pdict["MINUTELY"] = rrule.MINUTELY
pdict["HOURLY"] = rrule.HOURLY
pdict["DAILY"] = rrule.DAILY
pdict["WEEKLY"] = rrule.WEEKLY
pdict["MONTHLY"] = rrule.MONTHLY
pdict["YEARLY"] = rrule.YEARLY
prule = pdict[periodicity]
# reference date from command-line argument
r = time_units.split(" ")[2].split("-")
refdate = datetime(int(r[0]), int(r[1]), int(r[2]))
# create list with dates from start_date for nt counts
# periodicity prule.
bnds_datelist = list(rrule.rrule(prule, dtstart=start_date, count=nt + 1))
# calculate the days since refdate, including refdate, with time being the
bnds_interval_since_refdate = cftime.date2num(bnds_datelist, time_units, calendar=calendar)
if interval_type == "mid":
# mid-point value:
# time[n] = (bnds[n] + bnds[n+1]) / 2
time_interval_since_refdate = bnds_interval_since_refdate[0:-1] + np.diff(bnds_interval_since_refdate) / 2
elif interval_type == "start":
time_interval_since_refdate = bnds_interval_since_refdate[:-1]
else:
time_interval_since_refdate = bnds_interval_since_refdate[1:]
# create a new dimension for bounds only if it does not yet exist
time_dim = "time"
if time_dim not in list(nc.dimensions.keys()):
nc.createDimension(time_dim)
# create a new dimension for bounds only if it does not yet exist
bnds_dim = "nb2"
if bnds_dim not in list(nc.dimensions.keys()):
nc.createDimension(bnds_dim, 2)
# variable names consistent with PISM
time_var_name = "time"
bnds_var_name = "time_bnds"
# create time variable
if time_var_name not in nc.variables:
time_var = nc.createVariable(time_var_name, "d", dimensions=(time_dim))
else:
time_var = nc.variables[time_var_name]
time_var[:] = time_interval_since_refdate
time_var.bounds = bnds_var_name
time_var.units = time_units
time_var.calendar = calendar
time_var.standard_name = time_var_name
time_var.axis = "T"
# create time bounds variable
if bnds_var_name not in nc.variables:
time_bnds_var = nc.createVariable(bnds_var_name, "d", dimensions=(time_dim, bnds_dim))
else:
time_bnds_var = nc.variables[bnds_var_name]
time_bnds_var[:, 0] = bnds_interval_since_refdate[0:-1]
time_bnds_var[:, 1] = bnds_interval_since_refdate[1::]
# writing global attributes
script_command = " ".join([time.ctime(), ":", __file__.split("/")[-1], " ".join([str(x) for x in args])])
nc.history = script_command
nc.Conventions = "CF 1.5"
nc.close()
time2 = time.time()
print("adjust_timeline.py took {:2.1f}s".format(time2 - time1))
|
pism/pism
|
util/adjust_timeline.py
|
Python
|
gpl-3.0
| 5,319
|
[
"NetCDF"
] |
6600c5896f8cbeb00c6ac9d3dcdaa3d4eae7b9d23f2a241808706d135d6a8a5d
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 'True', 1, True]
BOOLEANS_FALSE = ['no', 'off', '0', 'false', 'False', 0, False]
BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE
# ansible modules can be written in any language. To simplify
# development of Python modules, the functions available here can
# be used to do many common tasks
import locale
import os
import re
import pipes
import shlex
import subprocess
import sys
import types
import time
import select
import shutil
import stat
import tempfile
import traceback
import grp
import pwd
import platform
import errno
import datetime
from itertools import repeat, chain
try:
import syslog
HAS_SYSLOG=True
except ImportError:
HAS_SYSLOG=False
try:
# Python 2
from itertools import imap
except ImportError:
# Python 3
imap = map
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = str
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = str
try:
# Python 2.6+
bytes
except NameError:
# Python 2.4
bytes = str
try:
dict.iteritems
except AttributeError:
# Python 3
def iteritems(d):
return d.items()
else:
# Python 2
def iteritems(d):
return d.iteritems()
try:
reduce
except NameError:
# Python 3
from functools import reduce
try:
NUMBERTYPES = (int, long, float)
except NameError:
# Python 3
NUMBERTYPES = (int, float)
# Python2 & 3 way to get NoneType
NoneType = type(None)
try:
from collections import Sequence, Mapping
except ImportError:
# python2.5
Sequence = (list, tuple)
Mapping = (dict,)
try:
from collections.abc import KeysView
SEQUENCETYPE = (Sequence, KeysView)
except:
SEQUENCETYPE = Sequence
try:
import json
# Detect the python-json library which is incompatible
# Look for simplejson if that's the case
try:
if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
try:
import simplejson as json
except ImportError:
print('\n{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
sys.exit(1)
except SyntaxError:
print('\n{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
sys.exit(1)
from ansible.module_utils.six import PY2, PY3, b, binary_type, text_type, string_types
HAVE_SELINUX=False
try:
import selinux
HAVE_SELINUX=True
except ImportError:
pass
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
except ImportError:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except ImportError:
pass
try:
from ast import literal_eval
except ImportError:
# a replacement for literal_eval that works with python 2.4. from:
# https://mail.python.org/pipermail/python-list/2009-September/551880.html
# which is essentially a cut/paste from an earlier (2.6) version of python's
# ast.py
from compiler import ast, parse
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {'None': None, 'True': True, 'False': False}
if isinstance(node_or_string, basestring):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, ast.Expression):
node_or_string = node_or_string.node
def _convert(node):
if isinstance(node, ast.Const) and isinstance(node.value, (basestring, int, float, long, complex)):
return node.value
elif isinstance(node, ast.Tuple):
return tuple(map(_convert, node.nodes))
elif isinstance(node, ast.List):
return list(map(_convert, node.nodes))
elif isinstance(node, ast.Dict):
return dict((_convert(k), _convert(v)) for k, v in node.items())
elif isinstance(node, ast.Name):
if node.name in _safe_names:
return _safe_names[node.name]
elif isinstance(node, ast.UnarySub):
return -_convert(node.expr)
raise ValueError('malformed string')
return _convert(node_or_string)
_literal_eval = literal_eval
# Backwards compat. There were present in basic.py before
from ansible.module_utils.pycompat24 import get_exception
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS=dict(
src = dict(),
mode = dict(type='raw'),
owner = dict(),
group = dict(),
seuser = dict(),
serole = dict(),
selevel = dict(),
setype = dict(),
follow = dict(type='bool', default=False),
# not taken by the file module, but other modules call file so it must ignore them.
content = dict(no_log=True),
backup = dict(),
force = dict(),
remote_src = dict(), # used by assemble
regexp = dict(), # used by assemble
delimiter = dict(), # used by assemble
directory_mode = dict(), # used by copy
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Can't use 07777 on Python 3, can't use 0o7777 on Python 2.4
PERM_BITS = int('07777', 8) # file mode permission bits
EXEC_PERM_BITS = int('00111', 8) # execute permission bits
DEFAULT_PERM = int('0666', 8) # default file permission bits
def get_platform():
''' what's the platform? example: Linux is a platform. '''
return platform.system()
def get_distribution():
''' return the distribution name '''
if platform.system() == 'Linux':
try:
supported_dists = platform._supported_dists + ('arch',)
distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
if not distribution and os.path.isfile('/etc/system-release'):
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
if 'Amazon' in distribution:
distribution = 'Amazon'
else:
distribution = 'OtherLinux'
except:
# FIXME: MethodMissing, I assume?
distribution = platform.dist()[0].capitalize()
else:
distribution = None
return distribution
def get_distribution_version():
''' return the distribution version '''
if platform.system() == 'Linux':
try:
distribution_version = platform.linux_distribution()[1]
if not distribution_version and os.path.isfile('/etc/system-release'):
distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
except:
# FIXME: MethodMissing, I assume?
distribution_version = platform.dist()[1]
else:
distribution_version = None
return distribution_version
def get_all_subclasses(cls):
'''
used by modules like Hardware or Network fact classes to retrieve all subclasses of a given class.
__subclasses__ return only direct sub classes. This one go down into the class tree.
'''
# Retrieve direct subclasses
subclasses = cls.__subclasses__()
to_visit = list(subclasses)
# Then visit all subclasses
while to_visit:
for sc in to_visit:
# The current class is now visited, so remove it from list
to_visit.remove(sc)
# Appending all subclasses to visit and keep a reference of available class
for ssc in sc.__subclasses__():
subclasses.append(ssc)
to_visit.append(ssc)
return subclasses
def load_platform_subclass(cls, *args, **kwargs):
'''
used by modules like User to have different implementations based on detected platform. See User
module for an example.
'''
this_platform = get_platform()
distribution = get_distribution()
subclass = None
# get the most specific superclass for this platform
if distribution is not None:
for sc in get_all_subclasses(cls):
if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
subclass = sc
if subclass is None:
for sc in get_all_subclasses(cls):
if sc.platform == this_platform and sc.distribution is None:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
def json_dict_unicode_to_bytes(d, encoding='utf-8'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, unicode):
return d.encode(encoding)
elif isinstance(d, dict):
return dict(imap(json_dict_unicode_to_bytes, iteritems(d), repeat(encoding)))
elif isinstance(d, list):
return list(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
elif isinstance(d, tuple):
return tuple(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
else:
return d
def json_dict_bytes_to_unicode(d, encoding='utf-8'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, bytes):
return unicode(d, encoding)
elif isinstance(d, dict):
return dict(imap(json_dict_bytes_to_unicode, iteritems(d), repeat(encoding)))
elif isinstance(d, list):
return list(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
elif isinstance(d, tuple):
return tuple(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
else:
return d
def return_values(obj):
""" Return stringified values from datastructures. For use with removing
sensitive values pre-jsonification."""
if isinstance(obj, basestring):
if obj:
if isinstance(obj, bytes):
yield obj
else:
# Unicode objects should all convert to utf-8
# (still must deal with surrogateescape on python3)
yield obj.encode('utf-8')
return
elif isinstance(obj, SEQUENCETYPE):
for element in obj:
for subelement in return_values(element):
yield subelement
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in return_values(element[1]):
yield subelement
elif isinstance(obj, (bool, NoneType)):
# This must come before int because bools are also ints
return
elif isinstance(obj, NUMBERTYPES):
yield str(obj)
else:
raise TypeError('Unknown parameter type: %s, %s' % (type(obj), obj))
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
if isinstance(value, basestring):
if isinstance(value, unicode):
# This should work everywhere on python2. Need to check
# surrogateescape on python3
bytes_value = value.encode('utf-8')
value_is_unicode = True
else:
bytes_value = value
value_is_unicode = False
if bytes_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
bytes_value = bytes_value.replace(omit_me, '*' * 8)
if value_is_unicode:
value = unicode(bytes_value, 'utf-8', errors='replace')
else:
value = bytes_value
elif isinstance(value, SEQUENCETYPE):
return [remove_values(elem, no_log_strings) for elem in value]
elif isinstance(value, Mapping):
return dict((k, remove_values(v, no_log_strings)) for k, v in value.items())
elif isinstance(value, tuple(chain(NUMBERTYPES, (bool, NoneType)))):
stringy_value = str(value)
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def is_executable(path):
'''is the given path executable?
Limitations:
* Does not account for FSACLs.
* Most times we really want to know "Can the current user execute this
file" This function does not tell us that, only if an execute bit is set.
'''
# These are all bitfields so first bitwise-or all the permissions we're
# looking for, then bitwise-and with the file's mode to determine if any
# execute bits are set.
return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE])
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
else:
raise AnsibleFallbackNotFound
class AnsibleFallbackNotFound(Exception):
pass
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None):
'''
common code for quickly building an ansible module in Python
(although you can write modules in anything that can return JSON)
see library/* for examples
'''
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.no_log = no_log
self.cleanup_files = []
self._debug = False
self._diff = False
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self.aliases = {}
self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity', '_ansible_selinux_special_fs', '_ansible_module_name', '_ansible_version', '_ansible_syslog_facility']
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except Exception:
e = get_exception()
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % str(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
# Use the argspec to determine which args are no_log
for arg_name, arg_opts in self.argument_spec.items():
if arg_opts.get('no_log', False):
# Find the value for the no_log'd param
no_log_object = self.params.get(arg_name, None)
if no_log_object:
self.no_log_values.update(return_values(no_log_object))
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._set_defaults(pre=False)
if not self.no_log and self._verbosity >= 3:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(path)
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(path):
path = os.path.realpath(path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc,out,err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
def _to_filesystem_str(self, path):
'''Returns filesystem path as a str, if it wasn't already.
Used in selinux interactions because it cannot accept unicode
instances, and specifying complex args in a playbook leaves
you with unicode instances. This method currently assumes
that your filesystem encoding is UTF-8.
'''
if isinstance(path, unicode):
path = path.encode("utf-8")
return path
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(self._to_filesystem_str(path), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(self._to_filesystem_str(path))
except OSError:
e = get_exception()
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, filename):
filename = os.path.expanduser(filename)
st = os.lstat(filename)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path = os.path.realpath(os.path.expanduser(os.path.expandvars(path)))
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(self._to_filesystem_str(path),
str(':'.join(new_context)))
except OSError:
e = get_exception()
self.fail_json(path=path, msg='invalid selinux context: %s' % str(e), new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None):
path = os.path.expanduser(path)
if owner is None:
return changed
orig_uid, orig_gid = self.user_and_group(path)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(path, uid, -1)
except OSError:
self.fail_json(path=path, msg='chown failed')
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None):
path = os.path.expanduser(path)
if group is None:
return changed
orig_uid, orig_gid = self.user_and_group(path)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(path, -1, gid)
except OSError:
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None):
path = os.path.expanduser(path)
path_stat = os.lstat(path)
if mode is None:
return changed
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception:
e = get_exception()
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=str(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = oct(prev_mode)
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = oct(mode)
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(path, mode)
else:
if not os.path.islink(path):
os.chmod(path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(path)
os.chmod(path, mode)
new_underlying_stat = os.stat(path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(path, stat.S_IMODE(underlying_stat.st_mode))
except OSError:
e = get_exception()
if os.path.islink(path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise e
except Exception:
e = get_exception()
self.fail_json(path=path, msg='chmod failed', details=str(e))
path_stat = os.lstat(path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def _symbolic_mode_to_octal(self, path_stat, symbolic_mode):
new_mode = stat.S_IMODE(path_stat.st_mode)
mode_re = re.compile(r'^(?P<users>[ugoa]+)(?P<operator>[-+=])(?P<perms>[rwxXst-]*|[ugo])$')
for mode in symbolic_mode.split(','):
match = mode_re.match(mode)
if match:
users = match.group('users')
operator = match.group('operator')
perms = match.group('perms')
if users == 'a':
users = 'ugo'
for user in users:
mode_to_apply = self._get_octal_mode_from_symbolic_perms(path_stat, user, perms)
new_mode = self._apply_operation_to_mode(user, operator, mode_to_apply, new_mode)
else:
raise ValueError("bad symbolic permission for mode: %s" % mode)
return new_mode
def _apply_operation_to_mode(self, user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u': mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g': mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o': mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
def _get_octal_mode_from_symbolic_perms(self, path_stat, user, perms):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH}
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0}
}
user_perms_to_modes = {
'u': {
'r': stat.S_IRUSR,
'w': stat.S_IWUSR,
'x': stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6 },
'g': {
'r': stat.S_IRGRP,
'w': stat.S_IWGRP,
'x': stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3 },
'o': {
'r': stat.S_IROTH,
'w': stat.S_IWOTH,
'x': stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO }
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
or_reduce = lambda mode, perm: mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff
)
return changed
def set_directory_attributes_if_different(self, file_args, changed, diff=None):
return self.set_fs_attributes_if_different(file_args, changed, diff)
def set_file_attributes_if_different(self, file_args, changed, diff=None):
return self.set_fs_attributes_if_different(file_args, changed, diff)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
if os.path.exists(path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(path)
kwargs['mode'] = oct(stat.S_IMODE(st[stat.ST_MODE]))
# secontext not yet supported
if os.path.islink(path):
kwargs['state'] = 'link'
elif os.path.isdir(path):
kwargs['state'] = 'directory'
elif os.stat(path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
else:
kwargs['state'] = 'absent'
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception:
e = get_exception()
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e)
def _handle_aliases(self):
# this uses exceptions as it happens before we can safely call fail_json
aliases_results = {} #alias:canon
for (k,v) in self.argument_spec.items():
self._legal_inputs.append(k)
aliases = v.get('aliases', None)
default = v.get('default', None)
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
raise Exception("internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if type(aliases) != list:
raise Exception('internal error: aliases must be a list')
for alias in aliases:
self._legal_inputs.append(alias)
aliases_results[alias] = k
if alias in self.params:
self.params[k] = self.params[alias]
return aliases_results
def _check_arguments(self, check_invalid_arguments):
self._syslog_facility = 'LOG_USER'
for (k,v) in list(self.params.items()):
if k == '_ansible_check_mode' and v:
self.check_mode = True
elif k == '_ansible_no_log':
self.no_log = self.boolean(v)
elif k == '_ansible_debug':
self._debug = self.boolean(v)
elif k == '_ansible_diff':
self._diff = self.boolean(v)
elif k == '_ansible_verbosity':
self._verbosity = v
elif k == '_ansible_selinux_special_fs':
self._selinux_special_fs = v
elif k == '_ansible_syslog_facility':
self._syslog_facility = v
elif k == '_ansible_version':
self.ansible_version = v
elif k == '_ansible_module_name':
self._name = v
elif check_invalid_arguments and k not in self._legal_inputs:
self.fail_json(msg="unsupported parameter for module: %s" % k)
#clean up internal params:
if k.startswith('_ansible_'):
del self.params[k]
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check):
count = 0
for term in check:
if term in self.params:
count += 1
return count
def _check_mutually_exclusive(self, spec):
if spec is None:
return
for check in spec:
count = self._count_terms(check)
if count > 1:
self.fail_json(msg="parameters are mutually exclusive: %s" % (check,))
def _check_required_one_of(self, spec):
if spec is None:
return
for check in spec:
count = self._count_terms(check)
if count == 0:
self.fail_json(msg="one of the following is required: %s" % ','.join(check))
def _check_required_together(self, spec):
if spec is None:
return
for check in spec:
counts = [ self._count_terms([field]) for field in check ]
non_zero = [ c for c in counts if c > 0 ]
if len(non_zero) > 0:
if 0 in counts:
self.fail_json(msg="parameters are required together: %s" % (check,))
def _check_required_arguments(self):
''' ensure all required arguments are present '''
missing = []
for (k,v) in self.argument_spec.items():
required = v.get('required', False)
if required and k not in self.params:
missing.append(k)
if len(missing) > 0:
self.fail_json(msg="missing required arguments: %s" % ",".join(missing))
def _check_required_if(self, spec):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
for (key, val, requirements) in spec:
missing = []
if key in self.params and self.params[key] == val:
for check in requirements:
count = self._count_terms((check,))
if count == 0:
missing.append(check)
if len(missing) > 0:
self.fail_json(msg="%s is %s but the following are missing: %s" % (key, val, ','.join(missing)))
def _check_argument_values(self):
''' ensure all arguments have the requested values, and there are no stray arguments '''
for (k,v) in self.argument_spec.items():
choices = v.get('choices',None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE):
if k in self.params:
if self.params[k] not in choices:
choices_str=",".join([str(c) for c in choices])
msg="value of %s must be one of: %s, got: %s" % (k, choices_str, self.params[k])
self.fail_json(msg=msg)
else:
self.fail_json(msg="internal error: choices for argument %s are not iterable: %s" % (k, choices))
def safe_eval(self, str, locals=None, include_exceptions=False):
# do not allow method calls to modules
if not isinstance(str, basestring):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (str, None)
return str
if re.search(r'\w\.\w+\(', str):
if include_exceptions:
return (str, None)
return str
# do not allow imports
if re.search(r'import \w+', str):
if include_exceptions:
return (str, None)
return str
try:
result = literal_eval(str)
if include_exceptions:
return (result, None)
else:
return result
except Exception:
e = get_exception()
if include_exceptions:
return (str, e)
return str
def _check_type_str(self, value):
if isinstance(value, basestring):
return value
# Note: This could throw a unicode error if value's __str__() method
# returns non-ascii. Have to port utils.to_bytes() if that happens
return str(value)
def _check_type_list(self, value):
if isinstance(value, list):
return value
if isinstance(value, basestring):
return value.split(",")
elif isinstance(value, int) or isinstance(value, float):
return [ str(value) ]
raise TypeError('%s cannot be converted to a list' % type(value))
def _check_type_dict(self, value):
if isinstance(value, dict):
return value
if isinstance(value, basestring):
if value.startswith("{"):
try:
return json.loads(value)
except:
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
if exc is not None:
raise TypeError('unable to evaluate string as dictionary')
return result
elif '=' in value:
fields = []
field_buffer = []
in_quote = False
in_escape = False
for c in value.strip():
if in_escape:
field_buffer.append(c)
in_escape = False
elif c == '\\':
in_escape = True
elif not in_quote and c in ('\'', '"'):
in_quote = c
elif in_quote and in_quote == c:
in_quote = False
elif not in_quote and c in (',', ' '):
field = ''.join(field_buffer)
if field:
fields.append(field)
field_buffer = []
else:
field_buffer.append(c)
field = ''.join(field_buffer)
if field:
fields.append(field)
return dict(x.split("=", 1) for x in fields)
else:
raise TypeError("dictionary requested, could not parse JSON or key=value")
raise TypeError('%s cannot be converted to a dict' % type(value))
def _check_type_bool(self, value):
if isinstance(value, bool):
return value
if isinstance(value, basestring) or isinstance(value, int):
return self.boolean(value)
raise TypeError('%s cannot be converted to a bool' % type(value))
def _check_type_int(self, value):
if isinstance(value, int):
return value
if isinstance(value, basestring):
return int(value)
raise TypeError('%s cannot be converted to an int' % type(value))
def _check_type_float(self, value):
if isinstance(value, float):
return value
if isinstance(value, basestring):
return float(value)
raise TypeError('%s cannot be converted to a float' % type(value))
def _check_type_path(self, value):
value = self._check_type_str(value)
return os.path.expanduser(os.path.expandvars(value))
def _check_type_jsonarg(self, value):
# Return a jsonified string. Sometimes the controller turns a json
# string into a dict/list so transform it back into json here
if isinstance(value, (unicode, bytes)):
return value.strip()
else:
if isinstance(value (list, tuple, dict)):
return json.dumps(value)
raise TypeError('%s cannot be converted to a json string' % type(value))
def _check_type_raw(self, value):
return value
def _check_argument_types(self):
''' ensure all arguments have the requested type '''
for (k, v) in self.argument_spec.items():
wanted = v.get('type', None)
if k not in self.params:
continue
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
if self.params[k] is None:
continue
wanted = 'str'
value = self.params[k]
if value is None:
continue
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
try:
self.params[k] = type_checker(value)
except (TypeError, ValueError):
self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s" % (k, type(value), wanted))
def _set_defaults(self, pre=True):
for (k,v) in self.argument_spec.items():
default = v.get('default', None)
if pre == True:
# this prevents setting defaults on required items
if default is not None and k not in self.params:
self.params[k] = default
else:
# make sure things without a default still get set None
if k not in self.params:
self.params[k] = default
def _set_fallbacks(self):
for k,v in self.argument_spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in self.params and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
self.params[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log(msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, bytes):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (bytes, unicode)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, bytes):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
journal.send(u"%s %s" % (module, journal_msg), **dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
passwd_keys = ['password', 'login_password']
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
elif param in passwd_keys:
log_args[param] = 'NOT_LOGGING_PASSWORD'
else:
param_val = self.params[param]
if not isinstance(param_val, basestring):
param_val = str(param_val)
elif isinstance(param_val, unicode):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = []
for arg in log_args:
arg_val = log_args[arg]
if not isinstance(arg_val, basestring):
arg_val = str(arg_val)
elif isinstance(arg_val, unicode):
arg_val = arg_val.encode('utf-8')
msg.append('%s=%s' % (arg, arg_val))
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK|os.R_OK):
raise
return cwd
except:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK|os.R_OK):
os.chdir(cwd)
return cwd
except:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=[]):
'''
find system executable in PATH.
Optional arguments:
- required: if executable is not found and required is true, fail_json
- opt_dirs: optional list of directories to search in addition to PATH
if found return full path; otherwise return None
'''
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
paths = []
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
paths += os.environ.get('PATH', '').split(os.pathsep)
bin_path = None
# mangle PATH to include /sbin dirs
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and is_executable(path):
bin_path = path
break
if required and bin_path is None:
self.fail_json(msg='Failed to find required executable %s' % arg)
return bin_path
def boolean(self, arg):
''' return a bool for the arg '''
if arg is None or type(arg) == bool:
return arg
if isinstance(arg, basestring):
arg = arg.lower()
if arg in BOOLEANS_TRUE:
return True
elif arg in BOOLEANS_FALSE:
return False
else:
self.fail_json(msg='Boolean %s not in either boolean list' % arg)
def jsonify(self, data):
for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding)
# Old systems using old simplejson module does not support encoding keyword.
except TypeError:
try:
new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
except UnicodeDecodeError:
continue
return json.dumps(new_data)
except UnicodeDecodeError:
continue
self.fail_json(msg='Invalid unicode encoding encountered')
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def exit_json(self, **kwargs):
''' return from the module, without error '''
self.add_path_info(kwargs)
if not 'changed' in kwargs:
kwargs['changed'] = False
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
kwargs = remove_values(kwargs, self.no_log_values)
self.do_cleanup_files()
print('\n%s' % self.jsonify(kwargs))
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
self.add_path_info(kwargs)
assert 'msg' in kwargs, "implementation error -- msg to explain the error is required"
kwargs['failed'] = True
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
kwargs = remove_values(kwargs, self.no_log_values)
self.do_cleanup_files()
print('\n%s' % self.jsonify(kwargs))
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
''' This is for checking for required params when we can not check via argspec because we
need more information than is simply given in the argspec.
'''
if not required_params:
return
missing_params = []
for required_param in required_params:
if not self.params.get(required_param):
missing_params.append(required_param)
if missing_params:
self.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
if not os.path.exists(filename):
return None
if os.path.isdir(filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(filename, 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename-YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s' % (fn, ext)
try:
shutil.copy2(fn, backupdest)
except (shutil.Error, IOError):
e = get_exception()
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError:
e = get_exception()
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, e))
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
if os.path.exists(dest):
try:
dest_stat = os.stat(dest)
os.chmod(src, dest_stat.st_mode & PERM_BITS)
os.chown(src, dest_stat.st_uid, dest_stat.st_gid)
except OSError:
e = get_exception()
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(dest)
try:
login_name = os.getlogin()
except OSError:
# not having a tty can cause the above to fail, so
# just get the LOGNAME environment variable instead
login_name = os.environ.get('LOGNAME', None)
# if the original login_name doesn't match the currently
# logged-in user, or if the SUDO_USER environment variable
# is set, then this user has switched their credentials
switched_user = login_name and login_name != pwd.getpwuid(os.getuid())[0] or os.environ.get('SUDO_USER')
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(src, dest)
except (IOError, OSError):
e = get_exception()
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e))
else:
dest_dir = os.path.dirname(dest)
dest_file = os.path.basename(dest)
try:
tmp_dest = tempfile.NamedTemporaryFile(
prefix=".ansible_tmp", dir=dest_dir, suffix=dest_file)
except (OSError, IOError):
e = get_exception()
self.fail_json(msg='The destination directory (%s) is not writable by the current user. Error was: %s' % (dest_dir, e))
try: # leaves tmp file behind when sudo and not root
if switched_user and os.getuid() != 0:
# cleanup will happen by 'rm' of tempdir
# copy2 will preserve some metadata
shutil.copy2(src, tmp_dest.name)
else:
shutil.move(src, tmp_dest.name)
if self.selinux_enabled():
self.set_context_if_different(
tmp_dest.name, context, False)
try:
tmp_stat = os.stat(tmp_dest.name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(tmp_dest.name, dest_stat.st_uid, dest_stat.st_gid)
except OSError:
e = get_exception()
if e.errno != errno.EPERM:
raise
os.rename(tmp_dest.name, dest)
except (shutil.Error, OSError, IOError):
e = get_exception()
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
if unsafe_writes and e.errno == errno.EBUSY:
#TODO: issue warning that this is an unsafe operation, but doing it cause user insists
try:
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError):
e = get_exception()
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, e))
else:
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e))
self.cleanup(tmp_dest.name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(dest, DEFAULT_PERM & ~umask)
if switched_user:
os.chown(dest, os.getuid(), os.getgid())
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None, environ_update=None):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment vairable so helper commands in
the same directory can also be found
:kw cwd: iIf given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kwarg environ_update: dictionary to *update* os.environ with
'''
shell = False
if isinstance(args, list):
if use_unsafe_shell:
args = " ".join([pipes.quote(x) for x in args])
shell = True
elif isinstance(args, basestring) and use_unsafe_shell:
shell = True
elif isinstance(args, string_types):
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2 and isinstance(args, text_type):
args = args.encode('utf-8')
elif PY3 and isinstance(args, binary_type):
args = args.decode('utf-8', errors='surrogateescape')
args = shlex.split(args)
else:
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = prompt_regex.encode('utf-8', errors='surrogateescape')
elif PY2:
prompt_regex = prompt_regex.encode('utf-8')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
# expand things like $HOME and ~
if not shell:
args = [ os.path.expandvars(os.path.expanduser(x)) for x in args if x is not None ]
rc = 0
msg = None
st_in = None
# Manipulate the environ we'll send to the new process
old_env_vals = {}
# We can set this from both an attribute and per call
for key, val in self.run_command_environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if environ_update:
for key, val in environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module and explode, the remote lib path will resemble ...
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system ...
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ziploader
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths \
if not x.endswith('/ansible_modlib.zip') \
and not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
# create a printable version of the command for use
# in reporting later, which strips out things like
# passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = args.encode('utf-8')
else:
if isinstance(args, binary_type):
to_clean_args = args.decode('utf-8', errors='replace')
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in to_clean_args:
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
clean_args = ' '.join(pipes.quote(arg) for arg in clean_args)
if data:
st_in = subprocess.PIPE
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if cwd and os.path.isdir(cwd):
kwargs['cwd'] = cwd
# store the pwd
prev_dir = os.getcwd()
# make sure we're in the right working directory
if cwd and os.path.isdir(cwd):
try:
os.chdir(cwd)
except (OSError, IOError):
e = get_exception()
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, str(e)))
try:
if self._debug:
if isinstance(args, list):
running = ' '.join(args)
else:
running = args
self.log('Executing: ' + running)
cmd = subprocess.Popen(args, **kwargs)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b('')
stderr = b('')
rpipes = [cmd.stdout, cmd.stderr]
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
if PY3:
errors = 'surrogateescape'
else:
errors = 'strict'
data = data.encode('utf-8', errors=errors)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
if cmd.stdout in rfd:
dat = os.read(cmd.stdout.fileno(), 9000)
stdout += dat
if dat == b(''):
rpipes.remove(cmd.stdout)
if cmd.stderr in rfd:
dat = os.read(cmd.stderr.fileno(), 9000)
stderr += dat
if dat == b(''):
rpipes.remove(cmd.stderr)
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfd) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
elif not rpipes and cmd.poll() == None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
rc = cmd.returncode
except (OSError, IOError):
e = get_exception()
self.fail_json(rc=e.errno, msg=str(e), cmd=clean_args)
except:
self.fail_json(rc=257, msg=traceback.format_exc(), cmd=clean_args)
# Restore env settings
for key, val in old_env_vals.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg)
# reset the pwd
os.chdir(prev_dir)
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def pretty_bytes(self,size):
ranges = (
(1<<70, 'ZB'),
(1<<60, 'EB'),
(1<<50, 'PB'),
(1<<40, 'TB'),
(1<<30, 'GB'),
(1<<20, 'MB'),
(1<<10, 'KB'),
(1, 'Bytes')
)
for limit, suffix in ranges:
if size >= limit:
break
return '%.2f %s' % (float(size)/ limit, suffix)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
|
supertom/ansible
|
lib/ansible/module_utils/basic.py
|
Python
|
gpl-3.0
| 85,059
|
[
"VisIt"
] |
69fb4d3b8e552715702cfe3ad850a385f0d82cb2cc01db10d4832d5928d4bf32
|
#!/usr/bin/env python
"""
Originally written by Kelly Vincent
pretty output and additional picard wrappers by Ross Lazarus for rgenetics
Runs all available wrapped Picard tools.
usage: picard_wrapper.py [options]
code Ross wrote licensed under the LGPL
see http://www.gnu.org/copyleft/lesser.html
"""
import optparse, os, sys, subprocess, tempfile, shutil, time, logging
galhtmlprefix = """<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="generator" content="Galaxy %s tool output - see http://getgalaxy.org/" />
<title></title>
<link rel="stylesheet" href="/static/style/base.css" type="text/css" />
</head>
<body>
<div class="document">
"""
galhtmlattr = """Galaxy tool %s run at %s</b><br/>"""
galhtmlpostfix = """</div></body></html>\n"""
def stop_err( msg ):
sys.stderr.write( '%s\n' % msg )
sys.exit()
def timenow():
"""return current time as a string
"""
return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))
class PicardBase():
"""
simple base class with some utilities for Picard
adapted and merged with Kelly Vincent's code april 2011 Ross
lots of changes...
"""
def __init__(self, opts=None,arg0=None):
""" common stuff needed at init for a picard tool
"""
assert opts <> None, 'PicardBase needs opts at init'
self.opts = opts
if self.opts.outdir == None:
self.opts.outdir = os.getcwd() # fixmate has no html file eg so use temp dir
assert self.opts.outdir <> None,'## PicardBase needs a temp directory if no output directory passed in'
self.picname = self.baseName(opts.jar)
if self.picname.startswith('picard'):
self.picname = opts.picard_cmd # special case for some tools like replaceheader?
self.progname = self.baseName(arg0)
self.version = '0.002'
self.delme = [] # list of files to destroy
self.title = opts.title
self.inputfile = opts.input
try:
os.makedirs(opts.outdir)
except:
pass
try:
os.makedirs(opts.tmpdir)
except:
pass
self.log_filename = os.path.join(self.opts.outdir,'%s.log' % self.picname)
self.metricsOut = os.path.join(opts.outdir,'%s.metrics.txt' % self.picname)
self.setLogging(logfname=self.log_filename)
def baseName(self,name=None):
return os.path.splitext(os.path.basename(name))[0]
def setLogging(self,logfname="picard_wrapper.log"):
"""setup a logger
"""
logging.basicConfig(level=logging.INFO,
filename=logfname,
filemode='a')
def readLarge(self,fname=None):
""" read a potentially huge file.
"""
try:
# get stderr, allowing for case where it's very large
tmp = open( fname, 'rb' )
s = ''
buffsize = 1048576
try:
while True:
more = tmp.read( buffsize )
if len(more) > 0:
s += more
else:
break
except OverflowError:
pass
tmp.close()
except Exception, e:
stop_err( 'Read Large Exception : %s' % str( e ) )
return s
def runCL(self,cl=None,output_dir=None):
""" construct and run a command line
we have galaxy's temp path as opt.temp_dir so don't really need isolation
sometimes stdout is needed as the output - ugly hacks to deal with potentially vast artifacts
"""
assert cl <> None, 'PicardBase runCL needs a command line as cl'
if output_dir == None:
output_dir = self.opts.outdir
if type(cl) == type([]):
cl = ' '.join(cl)
fd,templog = tempfile.mkstemp(dir=output_dir,suffix='rgtempRun.txt')
tlf = open(templog,'wb')
fd,temperr = tempfile.mkstemp(dir=output_dir,suffix='rgtempErr.txt')
tef = open(temperr,'wb')
process = subprocess.Popen(cl, shell=True, stderr=tef, stdout=tlf, cwd=output_dir)
rval = process.wait()
tlf.close()
tef.close()
stderrs = self.readLarge(temperr)
stdouts = self.readLarge(templog)
if rval > 0:
s = '## executing %s returned status %d and stderr: \n%s\n' % (cl,rval,stderrs)
stdouts = '%s\n%s' % (stdouts,stderrs)
else:
s = '## executing %s returned status %d and nothing on stderr\n' % (cl,rval)
logging.info(s)
os.unlink(templog) # always
os.unlink(temperr) # always
return s, stdouts, rval # sometimes s is an output
def runPic(self, jar, cl):
"""
cl should be everything after the jar file name in the command
"""
runme = ['java -Xmx%s' % self.opts.maxjheap]
runme.append(" -Djava.io.tmpdir='%s' " % self.opts.tmpdir)
runme.append('-jar %s' % jar)
runme += cl
s,stdouts,rval = self.runCL(cl=runme, output_dir=self.opts.outdir)
return stdouts,rval
def samToBam(self,infile=None,outdir=None):
"""
use samtools view to convert sam to bam
"""
fd,tempbam = tempfile.mkstemp(dir=outdir,suffix='rgutilsTemp.bam')
cl = ['samtools view -h -b -S -o ',tempbam,infile]
tlog,stdouts,rval = self.runCL(cl,outdir)
return tlog,tempbam,rval
def sortSam(self, infile=None,outfile=None,outdir=None):
"""
"""
print '## sortSam got infile=%s,outfile=%s,outdir=%s' % (infile,outfile,outdir)
cl = ['samtools sort',infile,outfile]
tlog,stdouts,rval = self.runCL(cl,outdir)
return tlog
def cleanup(self):
for fname in self.delme:
try:
os.unlink(fname)
except:
pass
def prettyPicout(self,transpose,maxrows):
"""organize picard outpouts into a report html page
"""
res = []
try:
r = open(self.metricsOut,'r').readlines()
except:
r = []
if len(r) > 0:
res.append('<b>Picard on line resources</b><ul>\n')
res.append('<li><a href="http://picard.sourceforge.net/index.shtml">Click here for Picard Documentation</a></li>\n')
res.append('<li><a href="http://picard.sourceforge.net/picard-metric-definitions.shtml">Click here for Picard Metrics definitions</a></li></ul><hr/>\n')
if transpose:
res.append('<b>Picard output (transposed to make it easier to see)</b><hr/>\n')
else:
res.append('<b>Picard output</b><hr/>\n')
res.append('<table cellpadding="3" >\n')
dat = []
heads = []
lastr = len(r) - 1
# special case for estimate library complexity hist
thist = False
for i,row in enumerate(r):
if row.strip() > '':
srow = row.split('\t')
if row.startswith('#'):
heads.append(row.strip()) # want strings
else:
dat.append(srow) # want lists
if row.startswith('## HISTOGRAM'):
thist = True
if len(heads) > 0:
hres = ['<tr class="d%d"><td colspan="2">%s</td></tr>' % (i % 2,x) for i,x in enumerate(heads)]
res += hres
heads = []
if len(dat) > 0:
if transpose and not thist:
tdat = map(None,*dat) # transpose an arbitrary list of lists
tdat = ['<tr class="d%d"><td>%s</td><td>%s </td></tr>\n' % ((i+len(heads)) % 2,x[0],x[1]) for i,x in enumerate(tdat)]
else:
tdat = ['\t'.join(x).strip() for x in dat] # back to strings :(
tdat = ['<tr class="d%d"><td colspan="2">%s</td></tr>\n' % ((i+len(heads)) % 2,x) for i,x in enumerate(tdat)]
res += tdat
dat = []
res.append('</table>\n')
return res
def fixPicardOutputs(self,transpose,maxloglines):
"""
picard produces long hard to read tab header files
make them available but present them transposed for readability
"""
logging.shutdown()
self.cleanup() # remove temp files stored in delme
rstyle="""<style type="text/css">
tr.d0 td {background-color: oldlace; color: black;}
tr.d1 td {background-color: aliceblue; color: black;}
</style>"""
res = [rstyle,]
res.append(galhtmlprefix % self.progname)
res.append(galhtmlattr % (self.picname,timenow()))
flist = [x for x in os.listdir(self.opts.outdir) if not x.startswith('.')]
pdflist = [x for x in flist if os.path.splitext(x)[-1].lower() == '.pdf']
if len(pdflist) > 0: # assumes all pdfs come with thumbnail .jpgs
for p in pdflist:
pbase = os.path.splitext(p)[0] # removes .pdf
imghref = '%s.jpg' % pbase
mimghref = '%s-0.jpg' % pbase # multiple pages pdf -> multiple thumbnails without asking!
if mimghref in flist:
imghref=mimghref # only one for thumbnail...it's a multi page pdf
res.append('<table cellpadding="10"><tr><td>\n')
res.append('<a href="%s"><img src="%s" title="Click image preview for a print quality PDF version" hspace="10" align="middle"></a>\n' % (p,imghref))
res.append('</tr></td></table>\n')
if len(flist) > 0:
res.append('<b>The following output files were created (click the filename to view/download a copy):</b><hr/>')
res.append('<table>\n')
for i,f in enumerate(flist):
fn = os.path.split(f)[-1]
res.append('<tr><td><a href="%s">%s</a></td></tr>\n' % (fn,fn))
res.append('</table><p/>\n')
pres = self.prettyPicout(transpose,maxloglines)
if len(pres) > 0:
res += pres
l = open(self.log_filename,'r').readlines()
llen = len(l)
if llen > 0:
res.append('<b>Picard Tool Run Log</b><hr/>\n')
rlog = ['<pre>',]
if llen > maxloglines:
n = min(50,int(maxloglines/2))
rlog += l[:n]
rlog.append('------------ ## %d rows deleted ## --------------\n' % (llen-maxloglines))
rlog += l[-n:]
else:
rlog += l
rlog.append('</pre>')
if llen > maxloglines:
rlog.append('\n<b>## WARNING - %d log lines truncated - <a href="%s">%s</a> contains entire output</b>' % (llen - maxloglines,self.log_filename,self.log_filename))
res += rlog
else:
res.append("### Odd, Picard left no log file %s - must have really barfed badly?\n" % self.log_filename)
res.append('<hr/>The freely available <a href="http://picard.sourceforge.net/command-line-overview.shtml">Picard software</a> \n')
res.append( 'generated all outputs reported here running as a <a href="http://getgalaxy.org">Galaxy</a> tool')
res.append(galhtmlpostfix)
outf = open(self.opts.htmlout,'w')
outf.write(''.join(res))
outf.write('\n')
outf.close()
def makePicInterval(self,inbed=None,outf=None):
"""
picard wants bait and target files to have the same header length as the incoming bam/sam
a meaningful (ie accurate) representation will fail because of this - so this hack
it would be far better to be able to supply the original bed untouched
Additional checking added Ross Lazarus Dec 2011 to deal with two 'bug' reports on the list
"""
assert inbed <> None
bed = open(inbed,'r').readlines()
sbed = [x.split('\t') for x in bed] # lengths MUST be 5
lens = [len(x) for x in sbed]
strands = [x[3] for x in sbed if not x[3] in ['+','-']]
maxl = max(lens)
minl = min(lens)
e = []
if maxl <> minl:
e.append("## Input error: Inconsistent field count in %s - please read the documentation on bait/target format requirements, fix and try again" % inbed)
if maxl <> 5:
e.append("## Input error: %d fields found in %s, 5 required - please read the warning and documentation on bait/target format requirements, fix and try again" % (maxl,inbed))
if len(strands) > 0:
e.append("## Input error: Fourth column in %s is not the required strand (+ or -) - please read the warning and documentation on bait/target format requirements, fix and try again" % (inbed))
if len(e) > 0: # write to stderr and quit
print >> sys.stderr, '\n'.join(e)
sys.exit(1)
thead = os.path.join(self.opts.outdir,'tempSamHead.txt')
if self.opts.datatype == 'sam':
cl = ['samtools view -H -S',self.opts.input,'>',thead]
else:
cl = ['samtools view -H',self.opts.input,'>',thead]
self.runCL(cl=cl,output_dir=self.opts.outdir)
head = open(thead,'r').readlines()
s = '## got %d rows of header\n' % (len(head))
logging.info(s)
o = open(outf,'w')
o.write(''.join(head))
o.write(''.join(bed))
o.close()
return outf
def cleanSam(self, insam=None, newsam=None, picardErrors=[],outformat=None):
"""
interesting problem - if paired, must remove mate pair of errors too or we have a new set of errors after cleaning - missing mate pairs!
Do the work of removing all the error sequences
pysam is cool
infile = pysam.Samfile( "-", "r" )
outfile = pysam.Samfile( "-", "w", template = infile )
for s in infile: outfile.write(s)
errors from ValidateSameFile.jar look like
WARNING: Record 32, Read name SRR006041.1202260, NM tag (nucleotide differences) is missing
ERROR: Record 33, Read name SRR006041.1042721, Empty sequence dictionary.
ERROR: Record 33, Read name SRR006041.1042721, RG ID on SAMRecord not found in header: SRR006041
"""
assert os.path.isfile(insam), 'rgPicardValidate cleansam needs an input sam file - cannot find %s' % insam
assert newsam <> None, 'rgPicardValidate cleansam needs an output new sam file path'
removeNames = [x.split(',')[1].replace(' Read name ','') for x in picardErrors if len(x.split(',')) > 2]
remDict = dict(zip(removeNames,range(len(removeNames))))
infile = pysam.Samfile(insam,'rb')
info = 'found %d error sequences in picardErrors, %d unique' % (len(removeNames),len(remDict))
if len(removeNames) > 0:
outfile = pysam.Samfile(newsam,'wb',template=infile) # template must be an open file
i = 0
j = 0
for row in infile:
dropme = remDict.get(row.qname,None) # keep if None
if not dropme:
outfile.write(row)
j += 1
else: # discard
i += 1
info = '%s\n%s' % (info, 'Discarded %d lines writing %d to %s from %s' % (i,j,newsam,insam))
outfile.close()
infile.close()
else: # we really want a nullop or a simple pointer copy
infile.close()
if newsam:
shutil.copy(insam,newsam)
logging.info(info)
def __main__():
doFix = False # tools returning htmlfile don't need this
doTranspose = True # default
maxloglines = 100 # default
#Parse Command Line
op = optparse.OptionParser()
# All tools
op.add_option('-i', '--input', dest='input', help='Input SAM or BAM file' )
op.add_option('-e', '--inputext', default=None)
op.add_option('-o', '--output', default=None)
op.add_option('-n', '--title', default="Pick a Picard Tool")
op.add_option('-t', '--htmlout', default=None)
op.add_option('-d', '--outdir', default=None)
op.add_option('-x', '--maxjheap', default='4g')
op.add_option('-b', '--bisulphite', default='false')
op.add_option('-s', '--sortorder', default='query')
op.add_option('','--tmpdir', default='/tmp')
op.add_option('-j','--jar',default='')
op.add_option('','--picard-cmd',default=None)
# Many tools
op.add_option( '', '--output-format', dest='output_format', help='Output format' )
op.add_option( '', '--bai-file', dest='bai_file', help='The path to the index file for the input bam file' )
op.add_option( '', '--ref', dest='ref', help='Built-in reference with fasta and dict file', default=None )
# CreateSequenceDictionary
op.add_option( '', '--ref-file', dest='ref_file', help='Fasta to use as reference', default=None )
op.add_option( '', '--species-name', dest='species_name', help='Species name to use in creating dict file from fasta file' )
op.add_option( '', '--build-name', dest='build_name', help='Name of genome assembly to use in creating dict file from fasta file' )
op.add_option( '', '--trunc-names', dest='trunc_names', help='Truncate sequence names at first whitespace from fasta file' )
# MarkDuplicates
op.add_option( '', '--remdups', default='true', help='Remove duplicates from output file' )
op.add_option( '', '--optdupdist', default="100", help='Maximum pixels between two identical sequences in order to consider them optical duplicates.' )
# CollectInsertSizeMetrics
op.add_option('', '--taillimit', default="0")
op.add_option('', '--histwidth', default="0")
op.add_option('', '--minpct', default="0.01")
op.add_option('', '--malevel', default='')
op.add_option('', '--deviations', default="0.0")
# CollectAlignmentSummaryMetrics
op.add_option('', '--maxinsert', default="20")
op.add_option('', '--adaptors', default='')
# FixMateInformation and validate
# CollectGcBiasMetrics
op.add_option('', '--windowsize', default='100')
op.add_option('', '--mingenomefrac', default='0.00001')
# AddOrReplaceReadGroups
op.add_option( '', '--rg-opts', dest='rg_opts', help='Specify extra (optional) arguments with full, otherwise preSet' )
op.add_option( '', '--rg-lb', dest='rg_library', help='Read Group Library' )
op.add_option( '', '--rg-pl', dest='rg_platform', help='Read Group platform (e.g. illumina, solid)' )
op.add_option( '', '--rg-pu', dest='rg_plat_unit', help='Read Group platform unit (eg. run barcode) ' )
op.add_option( '', '--rg-sm', dest='rg_sample', help='Read Group sample name' )
op.add_option( '', '--rg-id', dest='rg_id', help='Read Group ID' )
op.add_option( '', '--rg-cn', dest='rg_seq_center', help='Read Group sequencing center name' )
op.add_option( '', '--rg-ds', dest='rg_desc', help='Read Group description' )
# ReorderSam
op.add_option( '', '--allow-inc-dict-concord', dest='allow_inc_dict_concord', help='Allow incomplete dict concordance' )
op.add_option( '', '--allow-contig-len-discord', dest='allow_contig_len_discord', help='Allow contig length discordance' )
# ReplaceSamHeader
op.add_option( '', '--header-file', dest='header_file', help='sam or bam file from which header will be read' )
op.add_option('','--assumesorted', default='true')
op.add_option('','--readregex', default="[a-zA-Z0-9]+:[0-9]:([0-9]+):([0-9]+):([0-9]+).*")
#estimatelibrarycomplexity
op.add_option('','--minid', default="5")
op.add_option('','--maxdiff', default="0.03")
op.add_option('','--minmeanq', default="20")
#hsmetrics
op.add_option('','--baitbed', default=None)
op.add_option('','--targetbed', default=None)
#validate
op.add_option('','--ignoreflags', action='append', type="string")
op.add_option('','--maxerrors', default=None)
op.add_option('','--datatype', default=None)
op.add_option('','--bamout', default=None)
op.add_option('','--samout', default=None)
opts, args = op.parse_args()
opts.sortme = opts.assumesorted == 'false'
assert opts.input <> None
# need to add
# instance that does all the work
pic = PicardBase(opts,sys.argv[0])
tmp_dir = opts.outdir
haveTempout = False # we use this where sam output is an option
rval = 0
stdouts = 'Not run yet'
# set ref and dict files to use (create if necessary)
ref_file_name = opts.ref
if opts.ref_file <> None:
csd = 'CreateSequenceDictionary'
realjarpath = os.path.split(opts.jar)[0]
jarpath = os.path.join(realjarpath,'%s.jar' % csd) # for refseq
tmp_ref_fd, tmp_ref_name = tempfile.mkstemp( dir=opts.tmpdir , prefix = pic.picname)
ref_file_name = '%s.fasta' % tmp_ref_name
# build dict
dict_file_name = '%s.dict' % tmp_ref_name
os.symlink( opts.ref_file, ref_file_name )
cl = ['REFERENCE=%s' % ref_file_name]
cl.append('OUTPUT=%s' % dict_file_name)
cl.append('URI=%s' % os.path.basename( opts.ref_file ))
cl.append('TRUNCATE_NAMES_AT_WHITESPACE=%s' % opts.trunc_names)
if opts.species_name:
cl.append('SPECIES=%s' % opts.species_name)
if opts.build_name:
cl.append('GENOME_ASSEMBLY=%s' % opts.build_name)
pic.delme.append(dict_file_name)
pic.delme.append(ref_file_name)
pic.delme.append(tmp_ref_name)
stdouts,rval = pic.runPic(jarpath, cl)
# run relevant command(s)
# define temporary output
# if output is sam, it must have that extension, otherwise bam will be produced
# specify sam or bam file with extension
if opts.output_format == 'sam':
suff = '.sam'
else:
suff = ''
tmp_fd, tempout = tempfile.mkstemp( dir=opts.tmpdir, suffix=suff )
#cl = ['VALIDATION_STRINGENCY=LENIENT',] #RK
cl = ['VALIDATION_STRINGENCY=SILENT',] #RK
if pic.picname == 'AddOrReplaceReadGroups':
# sort order to match Galaxy's default
cl.append('SORT_ORDER=coordinate')
# input
cl.append('INPUT=%s' % opts.input)
# outputs
cl.append('OUTPUT=%s' % tempout)
# required read groups
cl.append('RGLB="%s"' % opts.rg_library)
cl.append('RGPL="%s"' % opts.rg_platform)
cl.append('RGPU="%s"' % opts.rg_plat_unit)
cl.append('RGSM="%s"' % opts.rg_sample)
if opts.rg_id:
cl.append('RGID="%s"' % opts.rg_id)
# optional read groups
if opts.rg_seq_center:
cl.append('RGCN="%s"' % opts.rg_seq_center)
if opts.rg_desc:
cl.append('RGDS="%s"' % opts.rg_desc)
stdouts,rval = pic.runPic(opts.jar, cl)
haveTempout = True
elif pic.picname == 'BamIndexStats':
tmp_fd, tmp_name = tempfile.mkstemp( dir=tmp_dir )
tmp_bam_name = '%s.bam' % tmp_name
tmp_bai_name = '%s.bai' % tmp_bam_name
os.symlink( opts.input, tmp_bam_name )
os.symlink( opts.bai_file, tmp_bai_name )
cl.append('INPUT=%s' % ( tmp_bam_name ))
pic.delme.append(tmp_bam_name)
pic.delme.append(tmp_bai_name)
pic.delme.append(tmp_name)
stdouts,rval = pic.runPic( opts.jar, cl )
f = open(pic.metricsOut,'a')
f.write(stdouts) # got this on stdout from runCl
f.write('\n')
f.close()
doTranspose = False # but not transposed
elif pic.picname == 'EstimateLibraryComplexity':
cl.append('I=%s' % opts.input)
cl.append('O=%s' % pic.metricsOut)
if float(opts.minid) > 0:
cl.append('MIN_IDENTICAL_BASES=%s' % opts.minid)
if float(opts.maxdiff) > 0.0:
cl.append('MAX_DIFF_RATE=%s' % opts.maxdiff)
if float(opts.minmeanq) > 0:
cl.append('MIN_MEAN_QUALITY=%s' % opts.minmeanq)
if opts.readregex > '':
cl.append('READ_NAME_REGEX="%s"' % opts.readregex)
if float(opts.optdupdist) > 0:
cl.append('OPTICAL_DUPLICATE_PIXEL_DISTANCE=%s' % opts.optdupdist)
stdouts,rval = pic.runPic(opts.jar, cl)
elif pic.picname == 'CollectAlignmentSummaryMetrics':
# Why do we do this fakefasta thing?
# Because we need NO fai to be available or picard barfs unless it matches the input data.
# why? Dunno Seems to work without complaining if the .bai file is AWOL....
fakefasta = os.path.join(opts.outdir,'%s_fake.fasta' % os.path.basename(ref_file_name))
try:
os.symlink(ref_file_name,fakefasta)
except:
s = '## unable to symlink %s to %s - different devices? Will shutil.copy'
info = s
shutil.copy(ref_file_name,fakefasta)
pic.delme.append(fakefasta)
cl.append('ASSUME_SORTED=true')
adaptlist = opts.adaptors.split(',')
adaptorseqs = ['ADAPTER_SEQUENCE=%s' % x for x in adaptlist]
cl += adaptorseqs
cl.append('IS_BISULFITE_SEQUENCED=%s' % opts.bisulphite)
cl.append('MAX_INSERT_SIZE=%s' % opts.maxinsert)
cl.append('OUTPUT=%s' % pic.metricsOut)
cl.append('R=%s' % fakefasta)
cl.append('TMP_DIR=%s' % opts.tmpdir)
if not opts.assumesorted.lower() == 'true': # we need to sort input
sortedfile = '%s.sorted' % os.path.basename(opts.input)
if opts.datatype == 'sam': # need to work with a bam
tlog,tempbam,trval = pic.samToBam(opts.input,opts.outdir)
pic.delme.append(tempbam)
try:
tlog = pic.sortSam(tempbam,sortedfile,opts.outdir)
except:
print '## exception on sorting sam file %s' % opts.input
else: # is already bam
try:
tlog = pic.sortSam(opts.input,sortedfile,opts.outdir)
except : # bug - [bam_sort_core] not being ignored - TODO fixme
print '## exception %s on sorting bam file %s' % (sys.exc_info()[0],opts.input)
cl.append('INPUT=%s.bam' % os.path.abspath(os.path.join(opts.outdir,sortedfile)))
pic.delme.append(os.path.join(opts.outdir,sortedfile))
else:
cl.append('INPUT=%s' % os.path.abspath(opts.input))
stdouts,rval = pic.runPic(opts.jar, cl)
elif pic.picname == 'CollectGcBiasMetrics':
assert os.path.isfile(ref_file_name),'PicardGC needs a reference sequence - cannot read %s' % ref_file_name
# sigh. Why do we do this fakefasta thing? Because we need NO fai to be available or picard barfs unless it has the same length as the input data.
# why? Dunno
fakefasta = os.path.join(opts.outdir,'%s_fake.fasta' % os.path.basename(ref_file_name))
try:
os.symlink(ref_file_name,fakefasta)
except:
s = '## unable to symlink %s to %s - different devices? May need to replace with shutil.copy'
info = s
shutil.copy(ref_file_name,fakefasta)
pic.delme.append(fakefasta)
x = 'rgPicardGCBiasMetrics'
pdfname = '%s.pdf' % x
jpgname = '%s.jpg' % x
tempout = os.path.join(opts.outdir,'rgPicardGCBiasMetrics.out')
temppdf = os.path.join(opts.outdir,pdfname)
cl.append('R=%s' % fakefasta)
cl.append('WINDOW_SIZE=%s' % opts.windowsize)
cl.append('MINIMUM_GENOME_FRACTION=%s' % opts.mingenomefrac)
cl.append('INPUT=%s' % opts.input)
cl.append('OUTPUT=%s' % tempout)
cl.append('TMP_DIR=%s' % opts.tmpdir)
cl.append('CHART_OUTPUT=%s' % temppdf)
cl.append('SUMMARY_OUTPUT=%s' % pic.metricsOut)
stdouts,rval = pic.runPic(opts.jar, cl)
if os.path.isfile(temppdf):
cl2 = ['convert','-resize x400',temppdf,os.path.join(opts.outdir,jpgname)] # make the jpg for fixPicardOutputs to find
s,stdouts,rval = pic.runCL(cl=cl2,output_dir=opts.outdir)
else:
s='### runGC: Unable to find pdf %s - please check the log for the causal problem\n' % temppdf
lf = open(pic.log_filename,'a')
lf.write(s)
lf.write('\n')
lf.close()
elif pic.picname == 'CollectInsertSizeMetrics':
""" <command interpreter="python">
picard_wrapper.py -i "$input_file" -n "$out_prefix" --tmpdir "${__new_file_path__}" --deviations "$deviations"
--histwidth "$histWidth" --minpct "$minPct" --malevel "$malevel"
-j "${GALAXY_DATA_INDEX_DIR}/shared/jars/picard/CollectInsertSizeMetrics.jar" -d "$html_file.files_path" -t "$html_file"
</command>
"""
isPDF = 'InsertSizeHist.pdf'
pdfpath = os.path.join(opts.outdir,isPDF)
histpdf = 'InsertSizeHist.pdf'
cl.append('I=%s' % opts.input)
cl.append('O=%s' % pic.metricsOut)
cl.append('HISTOGRAM_FILE=%s' % histpdf)
#if opts.taillimit <> '0': # this was deprecated although still mentioned in the docs at 1.56
# cl.append('TAIL_LIMIT=%s' % opts.taillimit)
if opts.histwidth <> '0':
cl.append('HISTOGRAM_WIDTH=%s' % opts.histwidth)
if float( opts.minpct) > 0.0:
cl.append('MINIMUM_PCT=%s' % opts.minpct)
if float(opts.deviations) > 0.0:
cl.append('DEVIATIONS=%s' % opts.deviations)
if opts.malevel:
malists = opts.malevel.split(',')
malist = ['METRIC_ACCUMULATION_LEVEL=%s' % x for x in malists]
cl += malist
stdouts,rval = pic.runPic(opts.jar, cl)
if os.path.exists(pdfpath): # automake thumbnail - will be added to html
cl2 = ['mogrify', '-format jpg -resize x400 %s' % pdfpath]
pic.runCL(cl=cl2,output_dir=opts.outdir)
else:
s = 'Unable to find expected pdf file %s<br/>\n' % pdfpath
s += 'This <b>always happens if single ended data was provided</b> to this tool,\n'
s += 'so please double check that your input data really is paired-end NGS data.<br/>\n'
s += 'If your input was paired data this may be a bug worth reporting to the galaxy-bugs list\n<br/>'
logging.info(s)
if len(stdouts) > 0:
logging.info(stdouts)
elif pic.picname == 'MarkDuplicates':
# assume sorted even if header says otherwise
cl.append('ASSUME_SORTED=%s' % (opts.assumesorted))
# input
cl.append('INPUT=%s' % opts.input)
# outputs
cl.append('OUTPUT=%s' % opts.output)
cl.append('METRICS_FILE=%s' % pic.metricsOut )
# remove or mark duplicates
cl.append('REMOVE_DUPLICATES=%s' % opts.remdups)
# the regular expression to be used to parse reads in incoming SAM file
cl.append('READ_NAME_REGEX="%s"' % opts.readregex)
# maximum offset between two duplicate clusters
cl.append('OPTICAL_DUPLICATE_PIXEL_DISTANCE=%s' % opts.optdupdist)
# RK validation stringency
#cl.append('VALIDATION_STRINGENCY=SILENT') #RK - already declared in the wrapper
# RK max file handles for read ends map
cl.append('MAX_FILE_HANDLES_FOR_READ_ENDS_MAP=8000') #RK
# RK sorting collection size ratio
cl.append('SORTING_COLLECTION_SIZE_RATIO=0.25') #RK
# RK verbosity
cl.append('VERBOSITY=INFO') #RK
# RK quiet
cl.append('QUIET=false') #RK
# RK compression level
cl.append('COMPRESSION_LEVEL=5') #RK
# RK max records in ram
cl.append('MAX_RECORDS_IN_RAM=500000') #RK
# RK create index
cl.append('CREATE_INDEX=true') #RK
# RK create md5 file
cl.append('CREATE_MD5_FILE=false') #RK
stdouts,rval = pic.runPic(opts.jar, cl)
elif pic.picname == 'FixMateInformation':
cl.append('I=%s' % opts.input)
cl.append('O=%s' % tempout)
cl.append('SORT_ORDER=%s' % opts.sortorder)
stdouts,rval = pic.runPic(opts.jar,cl)
haveTempout = True
elif pic.picname == 'ReorderSam':
# input
cl.append('INPUT=%s' % opts.input)
# output
cl.append('OUTPUT=%s' % tempout)
# reference
cl.append('REFERENCE=%s' % ref_file_name)
# incomplete dict concordance
if opts.allow_inc_dict_concord == 'true':
cl.append('ALLOW_INCOMPLETE_DICT_CONCORDANCE=true')
# contig length discordance
if opts.allow_contig_len_discord == 'true':
cl.append('ALLOW_CONTIG_LENGTH_DISCORDANCE=true')
stdouts,rval = pic.runPic(opts.jar, cl)
haveTempout = True
elif pic.picname == 'ReplaceSamHeader':
cl.append('INPUT=%s' % opts.input)
cl.append('OUTPUT=%s' % tempout)
cl.append('HEADER=%s' % opts.header_file)
stdouts,rval = pic.runPic(opts.jar, cl)
haveTempout = True
elif pic.picname == 'CalculateHsMetrics':
maxloglines = 100
baitfname = os.path.join(opts.outdir,'rgPicardHsMetrics.bait')
targetfname = os.path.join(opts.outdir,'rgPicardHsMetrics.target')
baitf = pic.makePicInterval(opts.baitbed,baitfname)
if opts.targetbed == opts.baitbed: # same file sometimes
targetf = baitf
else:
targetf = pic.makePicInterval(opts.targetbed,targetfname)
cl.append('BAIT_INTERVALS=%s' % baitf)
cl.append('TARGET_INTERVALS=%s' % targetf)
cl.append('INPUT=%s' % os.path.abspath(opts.input))
cl.append('OUTPUT=%s' % pic.metricsOut)
cl.append('TMP_DIR=%s' % opts.tmpdir)
stdouts,rval = pic.runPic(opts.jar,cl)
elif pic.picname == 'ValidateSamFile':
import pysam
doTranspose = False
sortedfile = os.path.join(opts.outdir,'rgValidate.sorted')
stf = open(pic.log_filename,'w')
tlog = None
if opts.datatype == 'sam': # need to work with a bam
tlog,tempbam,rval = pic.samToBam(opts.input,opts.outdir)
try:
tlog = pic.sortSam(tempbam,sortedfile,opts.outdir)
except:
print '## exception on sorting sam file %s' % opts.input
else: # is already bam
try:
tlog = pic.sortSam(opts.input,sortedfile,opts.outdir)
except: # bug - [bam_sort_core] not being ignored - TODO fixme
print '## exception on sorting bam file %s' % opts.input
if tlog:
print '##tlog=',tlog
stf.write(tlog)
stf.write('\n')
sortedfile = '%s.bam' % sortedfile # samtools does that
cl.append('O=%s' % pic.metricsOut)
cl.append('TMP_DIR=%s' % opts.tmpdir)
cl.append('I=%s' % sortedfile)
opts.maxerrors = '99999999'
cl.append('MAX_OUTPUT=%s' % opts.maxerrors)
if opts.ignoreflags[0] <> 'None': # picard error values to ignore
igs = ['IGNORE=%s' % x for x in opts.ignoreflags if x <> 'None']
cl.append(' '.join(igs))
if opts.bisulphite.lower() <> 'false':
cl.append('IS_BISULFITE_SEQUENCED=true')
if opts.ref <> None or opts.ref_file <> None:
cl.append('R=%s' % ref_file_name)
stdouts,rval = pic.runPic(opts.jar,cl)
if opts.datatype == 'sam':
pic.delme.append(tempbam)
newsam = opts.output
outformat = 'bam'
pe = open(pic.metricsOut,'r').readlines()
pic.cleanSam(insam=sortedfile, newsam=newsam, picardErrors=pe,outformat=outformat)
pic.delme.append(sortedfile) # not wanted
stf.close()
pic.cleanup()
else:
print >> sys.stderr,'picard.py got an unknown tool name - %s' % pic.picname
sys.exit(1)
if haveTempout:
# Some Picard tools produced a potentially intermediate bam file.
# Either just move to final location or create sam
if os.path.exists(tempout):
shutil.move(tempout, os.path.abspath(opts.output))
if opts.htmlout <> None or doFix: # return a pretty html page
pic.fixPicardOutputs(transpose=doTranspose,maxloglines=maxloglines)
if rval <> 0:
print >> sys.stderr, '## exit code=%d; stdout=%s' % (rval,stdouts)
# signal failure
if __name__=="__main__": __main__()
|
jhl667/galaxy_tools
|
tools/picard/picard_wrapper_old.py
|
Python
|
apache-2.0
| 36,790
|
[
"Galaxy",
"pysam"
] |
42c540c300ed549bf18b20ab19d93703ca9bc9b0f23734f78bb39e032f4a88c3
|
from django import forms
from osp.visits.models import Visit
from django.conf import settings
class VisitForm(forms.ModelForm):
reason = forms.MultipleChoiceField(choices=settings.VISIT_REASON_CHOICES)
class Meta:
model = Visit
exclude = ('student', 'submitter',)
widgets = {
'private': forms.RadioSelect
}
def clean(self):
cleaned_data=self.cleaned_data
if cleaned_data.has_key('reason'):
# convert reason from list to string of comma separated values
try:
reason_separator = settings.VISIT_REASON_SEPARATOR
except:
reason_separator = '; '
reason_str = reason_separator.join(cleaned_data['reason'])
cleaned_data['reason'] = reason_str
return cleaned_data
|
mariajosefrancolugo/osp
|
osp/visits/forms.py
|
Python
|
lgpl-3.0
| 831
|
[
"VisIt"
] |
da88d4754290f06c93e68abf10654afa658f7c0d5ce9f004d4986def613dc9cf
|
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Transforms for CSEP processing.
- `Headers`: Used to transform a CSEP's initial RFC-2822 header. It remains a
field list, but some entries get processed.
- `Contents`: Auto-inserts a table of contents.
- `CSEPZero`: Special processing for CSEP 0.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import time
from docutils import nodes, utils, languages
from docutils import ApplicationError, DataError
from docutils.transforms import Transform, TransformError
from docutils.transforms import parts, references, misc
class Headers(Transform):
"""
Process fields in a CSEP's initial RFC-2822 header.
"""
default_priority = 360
csep_url = 'csep-%04d.html'
csep_cvs_url = ('http://cvs.sourceforge.net/cgi-bin/viewcvs.cgi/crystal/'
'CS/docs/cseps/csep-%04d.txt')
rcs_keyword_substitutions = (
(re.compile(r'\$' r'RCSfile: (.+),v \$$', re.IGNORECASE), r'\1'),
(re.compile(r'\$[a-zA-Z]+: (.+) \$$'), r'\1'),)
def apply(self):
if not len(self.document):
# @@@ replace these DataErrors with proper system messages
raise DataError('Document tree is empty.')
header = self.document[0]
if not isinstance(header, nodes.field_list) or \
'rfc2822' not in header['classes']:
raise DataError('Document does not begin with an RFC-2822 '
'header; it is not a CSEP.')
csep = None
for field in header:
if field[0].astext().lower() == 'csep': # should be the first field
value = field[1].astext()
try:
csep = int(value)
cvs_url = self.csep_cvs_url % csep
except ValueError:
csep = value
cvs_url = None
msg = self.document.reporter.warning(
'"CSEP" header must contain an integer; "%s" is an '
'invalid value.' % csep, base_node=field)
msgid = self.document.set_id(msg)
prb = nodes.problematic(value, value or '(none)',
refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
if len(field[1]):
field[1][0][:] = [prb]
else:
field[1] += nodes.paragraph('', '', prb)
break
if csep is None:
raise DataError('Document does not contain an RFC-2822 "CSEP" '
'header.')
if csep == 0:
# Special processing for CSEP 0.
pending = nodes.pending(CSEPZero)
self.document.insert(1, pending)
self.document.note_pending(pending)
if len(header) < 2 or header[1][0].astext().lower() != 'title':
raise DataError('No title!')
for field in header:
name = field[0].astext().lower()
body = field[1]
if len(body) > 1:
raise DataError('CSEP header field body contains multiple '
'elements:\n%s' % field.pformat(level=1))
elif len(body) == 1:
if not isinstance(body[0], nodes.paragraph):
raise DataError('CSEP header field body may only contain '
'a single paragraph:\n%s'
% field.pformat(level=1))
elif name == 'last-modified':
date = time.strftime(
'%d-%b-%Y',
time.localtime(os.stat(self.document['source'])[8]))
if cvs_url:
body += nodes.paragraph(
'', '', nodes.reference('', date, refuri=cvs_url))
else:
# empty
continue
para = body[0]
if name == 'author':
for node in para:
if isinstance(node, nodes.reference):
node.parent.replace(node, mask_email(node))
elif name == 'discussions-to':
for node in para:
if isinstance(node, nodes.reference):
node.parent.replace(node, mask_email(node, csep))
elif name in ('replaces', 'replaced-by', 'requires'):
newbody = []
space = nodes.Text(' ')
for refcsep in re.split(',?\s+', body.astext()):
csepno = int(refcsep)
newbody.append(nodes.reference(
refcsep, refcsep,
refuri=(self.document.settings.csep_base_url
+ self.csep_url % csepno)))
newbody.append(space)
para[:] = newbody[:-1] # drop trailing space
elif name == 'last-modified':
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
if cvs_url:
date = para.astext()
para[:] = [nodes.reference('', date, refuri=cvs_url)]
elif name == 'content-type':
csep_type = para.astext()
uri = self.document.settings.csep_base_url + self.csep_url % 03
para[:] = [nodes.reference('', csep_type, refuri=uri)]
elif name == 'version' and len(body):
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
class Contents(Transform):
"""
Insert an empty table of contents topic and a transform placeholder into
the document after the RFC 2822 header.
"""
default_priority = 380
def apply(self):
language = languages.get_language(self.document.settings.language_code)
name = language.labels['contents']
title = nodes.title('', name)
topic = nodes.topic('', title, classes=['contents'])
name = nodes.fully_normalize_name(name)
if not self.document.has_name(name):
topic['names'].append(name)
self.document.note_implicit_target(topic)
pending = nodes.pending(parts.Contents)
topic += pending
self.document.insert(1, topic)
self.document.note_pending(pending)
class TargetNotes(Transform):
"""
Locate the "References" section, insert a placeholder for an external
target footnote insertion transform at the end, and schedule the
transform to run immediately.
"""
default_priority = 520
def apply(self):
doc = self.document
i = len(doc) - 1
refsect = copyright = None
while i >= 0 and isinstance(doc[i], nodes.section):
title_words = doc[i][0].astext().lower().split()
if 'references' in title_words:
refsect = doc[i]
break
elif 'copyright' in title_words:
copyright = i
i -= 1
if not refsect:
refsect = nodes.section()
refsect += nodes.title('', 'References')
doc.set_id(refsect)
if copyright:
# Put the new "References" section before "Copyright":
doc.insert(copyright, refsect)
else:
# Put the new "References" section at end of doc:
doc.append(refsect)
pending = nodes.pending(references.TargetNotes)
refsect.append(pending)
self.document.note_pending(pending, 0)
pending = nodes.pending(misc.CallBack,
details={'callback': self.cleanup_callback})
refsect.append(pending)
self.document.note_pending(pending, 1)
def cleanup_callback(self, pending):
"""
Remove an empty "References" section.
Called after the `references.TargetNotes` transform is complete.
"""
if len(pending.parent) == 2: # <title> and <pending>
pending.parent.parent.remove(pending.parent)
class CSEPZero(Transform):
"""
Special processing for CSEP 0.
"""
default_priority =760
def apply(self):
visitor = CSEPZeroSpecial(self.document)
self.document.walk(visitor)
self.startnode.parent.remove(self.startnode)
class CSEPZeroSpecial(nodes.SparseNodeVisitor):
"""
Perform the special processing needed by CSEP 0:
- Mask email addresses.
- Link CSEP numbers in the second column of 4-column tables to the CSEPs
themselves.
"""
csep_url = Headers.csep_url
def unknown_visit(self, node):
pass
def visit_reference(self, node):
node.parent.replace(node, mask_email(node))
def visit_field_list(self, node):
if 'rfc2822' in node['classes']:
raise nodes.SkipNode
def visit_tgroup(self, node):
self.csep_table = node['cols'] == 4
self.entry = 0
def visit_colspec(self, node):
self.entry += 1
if self.csep_table and self.entry == 2:
node['classes'].append('num')
def visit_row(self, node):
self.entry = 0
def visit_entry(self, node):
self.entry += 1
if self.csep_table and self.entry == 2 and len(node) == 1:
node['classes'].append('num')
p = node[0]
if isinstance(p, nodes.paragraph) and len(p) == 1:
text = p.astext()
try:
csep = int(text)
ref = (self.document.settings.csep_base_url
+ self.csep_url % csep)
p[0] = nodes.reference(text, text, refuri=ref)
except ValueError:
pass
non_masked_addresses = ('cseps@python.org',
'python-list@python.org',
'python-dev@python.org')
def mask_email(ref, csepno=None):
"""
Mask the email address in `ref` and return a replacement node.
`ref` is returned unchanged if it contains no email address.
For email addresses such as "user@host", mask the address as "user at
host" (text) to thwart simple email address harvesters (except for those
listed in `non_masked_addresses`). If a CSEP number (`csepno`) is given,
return a reference including a default email subject.
"""
if ref.hasattr('refuri') and ref['refuri'].startswith('mailto:'):
if ref['refuri'][8:] in non_masked_addresses:
replacement = ref[0]
else:
replacement_text = ref.astext().replace('@', ' at ')
replacement = nodes.raw('', replacement_text, format='html')
if csepno is None:
return replacement
else:
ref['refuri'] += '?subject=CSEP%%20%s' % csepno
ref[:] = [replacement]
return ref
else:
return ref
|
crystalspace/CS
|
docs/support/cseptools/cseps.py
|
Python
|
lgpl-2.1
| 11,118
|
[
"CRYSTAL"
] |
8a464bec42f303e973254811c0c4630de56143b3f4665fed3251d4a1a99cf442
|
"""Validate dependencies."""
from __future__ import annotations
import ast
from pathlib import Path
from homeassistant.const import Platform
from homeassistant.requirements import DISCOVERY_INTEGRATIONS
from .model import Integration
class ImportCollector(ast.NodeVisitor):
"""Collect all integrations referenced."""
def __init__(self, integration: Integration):
"""Initialize the import collector."""
self.integration = integration
self.referenced: dict[Path, set[str]] = {}
# Current file or dir we're inspecting
self._cur_fil_dir = None
def collect(self) -> None:
"""Collect imports from a source file."""
for fil in self.integration.path.glob("**/*.py"):
if not fil.is_file():
continue
self._cur_fil_dir = fil.relative_to(self.integration.path)
self.referenced[self._cur_fil_dir] = set()
self.visit(ast.parse(fil.read_text()))
self._cur_fil_dir = None
def _add_reference(self, reference_domain: str):
"""Add a reference."""
self.referenced[self._cur_fil_dir].add(reference_domain)
def visit_ImportFrom(self, node):
"""Visit ImportFrom node."""
if node.module is None:
return
# Exception: we will allow importing the sign path code.
if (
node.module == "homeassistant.components.http.auth"
and len(node.names) == 1
and node.names[0].name == "async_sign_path"
):
return
if node.module.startswith("homeassistant.components."):
# from homeassistant.components.alexa.smart_home import EVENT_ALEXA_SMART_HOME
# from homeassistant.components.logbook import bla
self._add_reference(node.module.split(".")[2])
elif node.module == "homeassistant.components":
# from homeassistant.components import sun
for name_node in node.names:
self._add_reference(name_node.name)
def visit_Import(self, node):
"""Visit Import node."""
# import homeassistant.components.hue as hue
for name_node in node.names:
if name_node.name.startswith("homeassistant.components."):
self._add_reference(name_node.name.split(".")[2])
def visit_Attribute(self, node):
"""Visit Attribute node."""
# hass.components.hue.async_create()
# Name(id=hass)
# .Attribute(attr=hue)
# .Attribute(attr=async_create)
# self.hass.components.hue.async_create()
# Name(id=self)
# .Attribute(attr=hass) or .Attribute(attr=_hass)
# .Attribute(attr=hue)
# .Attribute(attr=async_create)
if (
isinstance(node.value, ast.Attribute)
and node.value.attr == "components"
and (
(
isinstance(node.value.value, ast.Name)
and node.value.value.id == "hass"
)
or (
isinstance(node.value.value, ast.Attribute)
and node.value.value.attr in ("hass", "_hass")
)
)
):
self._add_reference(node.attr)
else:
# Have it visit other kids
self.generic_visit(node)
ALLOWED_USED_COMPONENTS = {
*{platform.value for platform in Platform},
# Internal integrations
"alert",
"automation",
"conversation",
"device_automation",
"frontend",
"group",
"hassio",
"homeassistant",
"input_boolean",
"input_button",
"input_datetime",
"input_number",
"input_select",
"input_text",
"media_source",
"onboarding",
"persistent_notification",
"person",
"script",
"shopping_list",
"sun",
"system_health",
"system_log",
"timer",
"webhook",
"websocket_api",
"zone",
# Other
"mjpeg", # base class, has no reqs or component to load.
"stream", # Stream cannot install on all systems, can be imported without reqs.
}
IGNORE_VIOLATIONS = {
# Has same requirement, gets defaults.
("sql", "recorder"),
# Sharing a base class
("openalpr_cloud", "openalpr_local"),
("lutron_caseta", "lutron"),
("ffmpeg_noise", "ffmpeg_motion"),
# Demo
("demo", "manual"),
("demo", "openalpr_local"),
# This would be a circular dep
("http", "network"),
# This should become a helper method that integrations can submit data to
("websocket_api", "lovelace"),
("websocket_api", "shopping_list"),
"logbook",
# Migration wizard from zwave to zwave_js.
"zwave_js",
}
def calc_allowed_references(integration: Integration) -> set[str]:
"""Return a set of allowed references."""
allowed_references = (
ALLOWED_USED_COMPONENTS
| set(integration.manifest.get("dependencies", []))
| set(integration.manifest.get("after_dependencies", []))
)
# Discovery requirements are ok if referenced in manifest
for check_domain, to_check in DISCOVERY_INTEGRATIONS.items():
if any(check in integration.manifest for check in to_check):
allowed_references.add(check_domain)
return allowed_references
def find_non_referenced_integrations(
integrations: dict[str, Integration],
integration: Integration,
references: dict[Path, set[str]],
):
"""Find intergrations that are not allowed to be referenced."""
allowed_references = calc_allowed_references(integration)
referenced = set()
for path, refs in references.items():
if len(path.parts) == 1:
# climate.py is stored as climate
cur_fil_dir = path.stem
else:
# climate/__init__.py is stored as climate
cur_fil_dir = path.parts[0]
is_platform_other_integration = cur_fil_dir in integrations
for ref in refs:
# We are always allowed to import from ourselves
if ref == integration.domain:
continue
# These references are approved based on the manifest
if ref in allowed_references:
continue
# Some violations are whitelisted
if (integration.domain, ref) in IGNORE_VIOLATIONS:
continue
# If it's a platform for another integration, the other integration is ok
if is_platform_other_integration and cur_fil_dir == ref:
continue
# These have a platform specified in this integration
if not is_platform_other_integration and (
(integration.path / f"{ref}.py").is_file()
# Platform dir
or (integration.path / ref).is_dir()
):
continue
referenced.add(ref)
return referenced
def validate_dependencies(
integrations: dict[str, Integration], integration: Integration
):
"""Validate all dependencies."""
# Some integrations are allowed to have violations.
if integration.domain in IGNORE_VIOLATIONS:
return
# Find usage of hass.components
collector = ImportCollector(integration)
collector.collect()
for domain in sorted(
find_non_referenced_integrations(
integrations, integration, collector.referenced
)
):
integration.add_error(
"dependencies",
f"Using component {domain} but it's not in 'dependencies' "
"or 'after_dependencies'",
)
def validate(integrations: dict[str, Integration], config):
"""Handle dependencies for integrations."""
# check for non-existing dependencies
for integration in integrations.values():
if not integration.manifest:
continue
validate_dependencies(integrations, integration)
if config.specific_integrations:
continue
# check that all referenced dependencies exist
after_deps = integration.manifest.get("after_dependencies", [])
for dep in integration.manifest.get("dependencies", []):
if dep in after_deps:
integration.add_error(
"dependencies",
f"Dependency {dep} is both in dependencies and after_dependencies",
)
if dep not in integrations:
integration.add_error(
"dependencies", f"Dependency {dep} does not exist"
)
|
rohitranjan1991/home-assistant
|
script/hassfest/dependencies.py
|
Python
|
mit
| 8,544
|
[
"VisIt"
] |
52661824421e4e264349b29e94910d762fe99d796a53f7ce427e2aa7757f78e4
|
import re
import sys
from setlx2py.setlx_ast import *
from setlx2py.setlx_parser import Parser
TAG_INTERPOLATION = "KLIENTERPOLATION"
class AstTransformer(NodeVisitor):
def visit(self, node, parent=None):
""" Visit a node.
"""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node, parent)
def generic_visit(self, node, parent):
""" Called if no explicit visitor function exists for a
node. Implements preorder visiting of the node.
"""
for c_name, c in node.children():
self.visit(c, node)
def visit_FileAST(self, n, p):
for i, stmt in enumerate(n.stmts):
if isinstance(stmt, Assignment) and isinstance(stmt.right, Procedure):
assignment = stmt
procedure = assignment.right
procedure.name = assignment.target.name
n.stmts[i] = procedure
self.generic_visit(n, p)
def visit_MatchCase(self, n, p):
pattern = n.pattern
if isinstance(pattern, List):
lst = ExprList(list(pattern.items))
n.pattern = Pattern(lst, None)
self.generic_visit(n, p)
def visit_Pattern(self, n, p):
head = n.head
if not isinstance(head, ExprList):
n.head = ExprList([head])
self.generic_visit(n, p)
def visit_Assignment(self, n, p):
target = n.target
if isinstance(target, List):
target.tags.append('bracketed')
self.generic_visit(n, p)
def visit_List(self, n, p):
if isinstance(p, Iterator) or isinstance(p, IteratorChain) or "bracketed" in p.tags:
n.tags.append('bracketed')
elif "bracketed" in p.tags:
n.tags.append('bracketed')
self.generic_visit(n, p)
def _load_file(self, path):
with open(path, 'r') as f:
source = f.read()
print source
parser = Parser()
transformer = AstTransformer()
ast = parser.parse(source)
transformer.visit(ast)
return ast
def visit_Interpolation(self, n, p):
self._fill_interpolation(n)
self.generic_visit(n, p)
def _fill_interpolation(self, n):
text = n.format_string.value
parser = Parser()
format_string, expressions = self._extract_interpolations(text)
exprs = [parser.parse(expr) for expr in expressions]
n.format_string.value = format_string
n.expressions.exprs.extend(exprs)
def _extract_interpolations(self, s):
pattern = re.compile('\$([^\$]+?)\$')
expressions = re.findall(pattern, s)
formatted = s
for i in range(len(expressions)):
formatted = re.sub(pattern, '{' + str(i) + '}', formatted, count=1)
return formatted, expressions
|
Rentier/setlx2py
|
setlx2py/setlx_ast_transformer.py
|
Python
|
apache-2.0
| 2,965
|
[
"VisIt"
] |
36e1806033b26ae54793ac8519427fb26e676419eafaf8b1ce2c32a0b0b1232f
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
clean_html,
ExtractorError,
determine_ext,
)
class XVideosIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?xvideos\.com/video(?P<id>[0-9]+)(?:.*)'
_TEST = {
'url': 'http://www.xvideos.com/video4588838/biker_takes_his_girl',
'md5': '4b46ae6ea5e6e9086e714d883313c0c9',
'info_dict': {
'id': '4588838',
'ext': 'flv',
'title': 'Biker Takes his Girl',
'age_limit': 18,
}
}
_ANDROID_USER_AGENT = 'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus Build/IMM76B) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.133 Mobile Safari/535.19'
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
mobj = re.search(r'<h1 class="inlineError">(.+?)</h1>', webpage)
if mobj:
raise ExtractorError('%s said: %s' % (self.IE_NAME, clean_html(mobj.group(1))), expected=True)
video_url = compat_urllib_parse.unquote(
self._search_regex(r'flv_url=(.+?)&', webpage, 'video URL'))
video_title = self._html_search_regex(
r'<title>(.*?)\s+-\s+XVID', webpage, 'title')
video_thumbnail = self._search_regex(
r'url_bigthumb=(.+?)&', webpage, 'thumbnail', fatal=False)
formats = [{
'url': video_url,
}]
android_req = compat_urllib_request.Request(url)
android_req.add_header('User-Agent', self._ANDROID_USER_AGENT)
android_webpage = self._download_webpage(android_req, video_id, fatal=False)
if android_webpage is not None:
player_params_str = self._search_regex(
'mobileReplacePlayerDivTwoQual\(([^)]+)\)',
android_webpage, 'player parameters', default='')
player_params = list(map(lambda s: s.strip(' \''), player_params_str.split(',')))
if player_params:
formats.extend([{
'url': param,
'preference': -10,
} for param in player_params if determine_ext(param) == 'mp4'])
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'title': video_title,
'ext': 'flv',
'thumbnail': video_thumbnail,
'age_limit': 18,
}
|
apllicationCOM/youtube-dl-api-server
|
youtube_dl_server/youtube_dl/extractor/xvideos.py
|
Python
|
unlicense
| 2,573
|
[
"Galaxy"
] |
0e6920744b39276e133b147b9629cf023c43ffaed22032afcdd8480a22e35b25
|
"""
Implementation of Regression on Order Statistics for imputing left-
censored (non-detect data)
Method described in *Nondetects and Data Analysis* by Dennis R.
Helsel (John Wiley, 2005) to estimate the left-censored (non-detect)
values of a dataset.
Author: Paul M. Hobson
Company: Geosyntec Consultants (Portland, OR)
Date: 2016-06-14
"""
from __future__ import division
import warnings
import numpy
from scipy import stats
import pandas
from statsmodels.compat.pandas import sort_values
def _ros_sort(df, observations, censorship, warn=False):
"""
This function prepares a dataframe for ROS.
It sorts ascending with
left-censored observations first. Censored observations larger than
the maximum uncensored observations are removed from the dataframe.
Parameters
----------
df : pandas.DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
Returns
------
sorted_df : pandas.DataFrame
The sorted dataframe with all columns dropped except the
observation and censorship columns.
"""
# separate uncensored data from censored data
censored = sort_values(df[df[censorship]], observations, axis=0)
uncensored = sort_values(df[~df[censorship]], observations, axis=0)
if censored[observations].max() > uncensored[observations].max():
censored = censored[censored[observations] <= uncensored[observations].max()]
if warn:
msg = ("Dropping censored observations greater than "
"the max uncensored observation.")
warnings.warn(msg)
return censored.append(uncensored)[[observations, censorship]].reset_index(drop=True)
def cohn_numbers(df, observations, censorship):
r"""
Computes the Cohn numbers for the detection limits in the dataset.
The Cohn Numbers are:
- :math:`A_j =` the number of uncensored obs above the jth
threshold.
- :math:`B_j =` the number of observations (cen & uncen) below
the jth threshold.
- :math:`C_j =` the number of censored observations at the jth
threshold.
- :math:`\mathrm{PE}_j =` the probability of exceeding the jth
threshold
- :math:`\mathrm{DL}_j =` the unique, sorted detection limits
- :math:`\mathrm{DL}_{j+1} = \mathrm{DL}_j` shifted down a
single index (row)
Parameters
----------
dataframe : pandas.DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
Returns
-------
cohn : pandas.DataFrame
"""
def nuncen_above(row):
""" A, the number of uncensored obs above the given threshold.
"""
# index of observations above the lower_dl DL
above = df[observations] >= row['lower_dl']
# index of observations below the upper_dl DL
below = df[observations] < row['upper_dl']
# index of non-detect observations
detect = df[censorship] == False
# return the number of observations where all conditions are True
return df[above & below & detect].shape[0]
def nobs_below(row):
""" B, the number of observations (cen & uncen) below the given
threshold
"""
# index of data less than the lower_dl DL
less_than = df[observations] < row['lower_dl']
# index of data less than or equal to the lower_dl DL
less_thanequal = df[observations] <= row['lower_dl']
# index of detects, non-detects
uncensored = df[censorship] == False
censored = df[censorship] == True
# number observations less than or equal to lower_dl DL and non-detect
LTE_censored = df[less_thanequal & censored].shape[0]
# number of observations less than lower_dl DL and detected
LT_uncensored = df[less_than & uncensored].shape[0]
# return the sum
return LTE_censored + LT_uncensored
def ncen_equal(row):
""" C, the number of censored observations at the given
threshold.
"""
censored_index = df[censorship]
censored_data = df[observations][censored_index]
censored_below = censored_data == row['lower_dl']
return censored_below.sum()
def set_upper_limit(cohn):
""" Sets the upper_dl DL for each row of the Cohn dataframe. """
if cohn.shape[0] > 1:
return cohn['lower_dl'].shift(-1).fillna(value=numpy.inf)
else:
return [numpy.inf]
def compute_PE(A, B):
""" Computes the probability of excedance for each row of the
Cohn dataframe. """
N = len(A)
PE = numpy.empty(N, dtype='float64')
PE[-1] = 0.0
for j in range(N-2, -1, -1):
PE[j] = PE[j+1] + (1 - PE[j+1]) * A[j] / (A[j] + B[j])
return PE
# unique, sorted detection limts
censored_data = df[censorship]
DLs = pandas.unique(df.loc[censored_data, observations])
DLs.sort()
# if there is a observations smaller than the minimum detection limit,
# add that value to the array
if DLs.shape[0] > 0:
if df[observations].min() < DLs.min():
DLs = numpy.hstack([df[observations].min(), DLs])
# create a dataframe
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
cohn = pandas.DataFrame(DLs, columns=['lower_dl'])
cohn.loc[:, 'upper_dl'] = set_upper_limit(cohn)
cohn.loc[:, 'nuncen_above'] = cohn.apply(nuncen_above, axis=1)
cohn.loc[:, 'nobs_below'] = cohn.apply(nobs_below, axis=1)
cohn.loc[:, 'ncen_equal'] = cohn.apply(ncen_equal, axis=1)
cohn = cohn.reindex(range(DLs.shape[0] + 1))
cohn.loc[:, 'prob_exceedance'] = compute_PE(cohn['nuncen_above'], cohn['nobs_below'])
else:
dl_cols = ['lower_dl', 'upper_dl', 'nuncen_above',
'nobs_below', 'ncen_equal', 'prob_exceedance']
cohn = pandas.DataFrame(numpy.empty((0, len(dl_cols))), columns=dl_cols)
return cohn
def _detection_limit_index(obs, cohn):
"""
Locates the corresponding detection limit for each observation.
Basically, creates an array of indices for the detection limits
(Cohn numbers) corresponding to each data point.
Parameters
----------
obs : float
A single observation from the larger dataset.
cohn : pandas.DataFrame
Dataframe of Cohn numbers.
Returns
-------
det_limit_index : int
The index of the corresponding detection limit in `cohn`
See also
--------
cohn_numbers
"""
if cohn.shape[0] > 0:
index, = numpy.where(cohn['lower_dl'] <= obs)
det_limit_index = index[-1]
else:
det_limit_index = 0
return det_limit_index
def _ros_group_rank(df, dl_idx, censorship):
"""
Ranks each observation within the data groups.
In this case, the groups are defined by the record's detection
limit index and censorship status.
Parameters
----------
df : pandas.DataFrame
dl_idx : str
Name of the column in the dataframe the index of the
observations' corresponding detection limit in the `cohn`
dataframe.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
Returns
-------
ranks : numpy.array
Array of ranks for the dataset.
"""
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
ranks = df.copy()
ranks.loc[:, 'rank'] = 1
ranks = (
ranks.groupby(by=[dl_idx, censorship])['rank']
.transform(lambda g: g.cumsum())
)
return ranks
def _ros_plot_pos(row, censorship, cohn):
"""
ROS-specific plotting positions.
Computes the plotting position for an observation based on its rank,
censorship status, and detection limit index.
Parameters
----------
row : pandas.Series or dict-like
Full observation (row) from a censored dataset. Requires a
'rank', 'detection_limit', and `censorship` column.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
cohn : pandas.DataFrame
Dataframe of Cohn numbers.
Returns
-------
plotting_position : float
See also
--------
cohn_numbers
"""
DL_index = row['det_limit_index']
rank = row['rank']
censored = row[censorship]
dl_1 = cohn.iloc[DL_index]
dl_2 = cohn.iloc[DL_index + 1]
if censored:
return (1 - dl_1['prob_exceedance']) * rank / (dl_1['ncen_equal']+1)
else:
return (1 - dl_1['prob_exceedance']) + (dl_1['prob_exceedance'] - dl_2['prob_exceedance']) * \
rank / (dl_1['nuncen_above']+1)
def _norm_plot_pos(observations):
"""
Computes standard normal (Gaussian) plotting positions using scipy.
Parameters
----------
observations : array-like
Sequence of observed quantities.
Returns
-------
plotting_position : array of floats
"""
ppos, sorted_res = stats.probplot(observations, fit=False)
return stats.norm.cdf(ppos)
def plotting_positions(df, censorship, cohn):
"""
Compute the plotting positions for the observations.
The ROS-specific plotting postions are based on the observations'
rank, censorship status, and corresponding detection limit.
Parameters
----------
df : pandas.DataFrame
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
cohn : pandas.DataFrame
Dataframe of Cohn numbers.
Returns
-------
plotting_position : array of float
See also
--------
cohn_numbers
"""
plot_pos = df.apply(lambda r: _ros_plot_pos(r, censorship, cohn), axis=1)
# correctly sort the plotting positions of the ND data:
ND_plotpos = plot_pos[df[censorship]]
ND_plotpos.values.sort()
plot_pos[df[censorship]] = ND_plotpos
return plot_pos
def _impute(df, observations, censorship, transform_in, transform_out):
"""
Executes the basic regression on order stat (ROS) proceedure.
Uses ROS to impute censored from the best-fit line of a
probability plot of the uncensored values.
Parameters
----------
df : pandas.DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
transform_in, transform_out : callable
Transformations to be applied to the data prior to fitting
the line and after estimated values from that line. Typically,
`numpy.log` and `numpy.exp` are used, respectively.
Returns
-------
estimated : pandas.DataFrame
A new dataframe with two new columns: "estimated" and "final".
The "estimated" column contains of the values inferred from the
best-fit line. The "final" column contains the estimated values
only where the original observations were censored, and the original
observations everwhere else.
"""
# detect/non-detect selectors
uncensored_mask = df[censorship] == False
censored_mask = df[censorship] == True
# fit a line to the logs of the detected data
fit_params = stats.linregress(
df['Zprelim'][uncensored_mask],
transform_in(df[observations][uncensored_mask])
)
# pull out the slope and intercept for use later
slope, intercept = fit_params[:2]
# model the data based on the best-fit curve
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
df.loc[:, 'estimated'] = transform_out(slope * df['Zprelim'][censored_mask] + intercept)
df.loc[:, 'final'] = numpy.where(df[censorship], df['estimated'], df[observations])
return df
def _do_ros(df, observations, censorship, transform_in, transform_out):
"""
Dataframe-centric function to impute censored valies with ROS.
Prepares a dataframe for, and then esimates the values of a censored
dataset using Regression on Order Statistics
Parameters
----------
df : pandas.DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
transform_in, transform_out : callable
Transformations to be applied to the data prior to fitting
the line and after estimated values from that line. Typically,
`numpy.log` and `numpy.exp` are used, respectively.
Returns
-------
estimated : pandas.DataFrame
A new dataframe with two new columns: "estimated" and "final".
The "estimated" column contains of the values inferred from the
best-fit line. The "final" column contains the estimated values
only where the original observations were censored, and the original
observations everwhere else.
"""
# compute the Cohn numbers
cohn = cohn_numbers(df, observations=observations, censorship=censorship)
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
modeled = _ros_sort(df, observations=observations, censorship=censorship)
modeled.loc[:, 'det_limit_index'] = modeled[observations].apply(_detection_limit_index, args=(cohn,))
modeled.loc[:, 'rank'] = _ros_group_rank(modeled, 'det_limit_index', censorship)
modeled.loc[:, 'plot_pos'] = plotting_positions(modeled, censorship, cohn)
modeled.loc[:, 'Zprelim'] = stats.norm.ppf(modeled['plot_pos'])
return _impute(modeled, observations, censorship, transform_in, transform_out)
def impute_ros(observations, censorship, df=None, min_uncensored=2,
max_fraction_censored=0.8, substitution_fraction=0.5,
transform_in=numpy.log, transform_out=numpy.exp,
as_array=True):
"""
Impute censored dataset using Regression on Order Statistics (ROS).
Method described in *Nondetects and Data Analysis* by Dennis R.
Helsel (John Wiley, 2005) to estimate the left-censored (non-detect)
values of a dataset. When there is insufficient non-censorded data,
simple substitution is used.
Parameters
----------
observations : str or array-like
Label of the column or the float array of censored observations
censorship : str
Label of the column or the bool array of the censorship
status of the observations.
* True if censored,
* False if uncensored
df : pandas.DataFrame, optional
If `observations` and `censorship` are labels, this is the
DataFrame that contains those columns.
min_uncensored : int (default is 2)
The minimum number of uncensored values required before ROS
can be used to impute the censored observations. When this
criterion is not met, simple substituion is used instead.
max_fraction_censored : float (default is 0.8)
The maximum fraction of censored data below which ROS can be
used to impute the censored observations. When this fraction is
exceeded, simple substituion is used instead.
substitution_fraction : float (default is 0.5)
The fraction of the detection limit to be used during simple
substitution of the censored values.
transform_in : callable (default is numpy.log)
Transformation to be applied to the values prior to fitting a
line to the plotting positions vs. uncensored values.
transform_out : callable (default is numpy.exp)
Transformation to be applied to the imputed censored values
estimated from the previously computed best-fit line.
as_array : bool (default is True)
When True, a numpy array of the imputed observations is
returned. Otherwise, a modified copy of the original dataframe
with all of the intermediate calculations is returned.
Returns
-------
imputed : numpy.array (default) or pandas.DataFrame
The final observations where the censored values have either been
imputed through ROS or substituted as a fraction of the
detection limit.
Notes
-----
This function requires pandas 0.14 or more recent.
"""
# process arrays into a dataframe, if necessary
if df is None:
df = pandas.DataFrame({'obs': observations, 'cen': censorship})
observations = 'obs'
censorship = 'cen'
# basic counts/metrics of the dataset
N_observations = df.shape[0]
N_censored = df[censorship].astype(int).sum()
N_uncensored = N_observations - N_censored
fraction_censored = N_censored / N_observations
# add plotting positions if there are no censored values
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
if N_censored == 0:
output = df[[observations, censorship]].copy()
output.loc[:, 'final'] = df[observations]
# substitute w/ fraction of the DLs if there's insufficient
# uncensored data
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
elif (N_uncensored < min_uncensored) or (fraction_censored > max_fraction_censored):
output = df[[observations, censorship]].copy()
output.loc[:, 'final'] = df[observations]
output.loc[df[censorship], 'final'] *= substitution_fraction
# normal ROS stuff
else:
output = _do_ros(df, observations, censorship, transform_in, transform_out)
# convert to an array if necessary
if as_array:
output = output['final'].values
return output
|
bert9bert/statsmodels
|
statsmodels/imputation/ros.py
|
Python
|
bsd-3-clause
| 19,099
|
[
"Gaussian"
] |
9dbc4838cd50db76a74c8acff5880141fe3895a30743089ef0f4bba16d26e6a2
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkGeometryFilter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkGeometryFilter(), 'Processing.',
('vtkDataSet',), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
chrisidefix/devide
|
modules/vtk_basic/vtkGeometryFilter.py
|
Python
|
bsd-3-clause
| 488
|
[
"VTK"
] |
383b6d7453c7921c38eccb7e4d27f785b63d714e6a43ffac5bd75ce0c2f3779a
|
"""
Instructor Dashboard Views
"""
import logging
import datetime
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
import uuid
import pytz
import mimetypes
import json
import pdb
import os
import hashlib
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.http import Http404, HttpResponseServerError, HttpResponseRedirect
from django.conf import settings
from util.json_request import JsonResponse
from mock import patch
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from openedx.core.lib.xblock_utils import wrap_xblock
from xmodule.html_module import HtmlDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.tabs import CourseTab
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from courseware.access import has_access
from courseware.courses import get_course_by_id, get_studio_url
from django_comment_client.utils import has_forum_access
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR
from student.models import CourseEnrollment
from shoppingcart.models import Coupon, PaidCourseRegistration, CourseRegCodeItem
from course_modes.models import CourseMode, CourseModesArchive
from student.roles import CourseFinanceAdminRole, CourseSalesAdminRole
from certificates.models import (
CertificateGenerationConfiguration,
CertificateWhitelist,
GeneratedCertificate,
CertificateStatuses,
CertificateGenerationHistory,
CertificateInvalidation,
)
from certificates import api as certs_api
from util.date_utils import get_default_time_display
from class_dashboard.dashboard_data import get_section_display_name, get_array_section_has_problem
from .tools import get_units_with_due_date, title_or_url, bulk_email_is_enabled_for_course
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from .tools import get_student_from_identifier
from student.forms import ClassSetForm, CompetitionSubmissionForm
from django.forms.models import model_to_dict
from student.helpers import is_teacher, get_my_classes, get_class_size
from student.models import ClassSet, CompetitionSubmission
from instructor.utils import get_class_codes_of_teacher, get_last_submission_summary
from django.contrib.admin.views.decorators import staff_member_required
from django.views.decorators.clickjacking import xframe_options_exempt
from django.contrib.auth.models import User
from student.helpers import get_my_classes, get_class_size, is_teacher, is_student, get_student_class_info
from instructor.views.api import _get_assignment_names as get_assignment_names
from django.core.files.storage import default_storage
from django.core.files import File
log = logging.getLogger(__name__)
#NEW: TeacherDashboardTab
class TeacherDashboardTab(CourseTab):
"""
Defines the Teacher Dashboard view type that is shown as a course tab.
"""
type = "teacher"
title = ugettext_noop('Teacher Dashboard')
view_name = "teacher_dashboard"
is_dynamic = True # The "Teacher Dashboard" tab is instead dynamically added when it is enabled
@classmethod
def is_enabled(cls, course, user=None):
"""
Returns true if the specified user has staff access.
"""
return bool(user and has_access(user, 'teacher', course.id))
class InstructorDashboardTab(CourseTab):
"""
Defines the Instructor Dashboard view type that is shown as a course tab.
"""
type = "instructor"
title = ugettext_noop('Instructor')
view_name = "instructor_dashboard"
is_dynamic = True # The "Instructor" tab is instead dynamically added when it is enabled
@classmethod
def is_enabled(cls, course, user=None):
"""
Returns true if the specified user has staff access.
"""
return bool(user and has_access(user, 'staff', course, course.id))
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def teacher_dashboard(request, course_id):
""" Display the teacher dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error(u"Unable to find course with course key %s while loading the Teacher Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=0)
access = {
'admin': request.user.is_staff,
'instructor': bool(has_access(request.user, 'instructor', course)),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': bool(has_access(request.user, 'staff', course)),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
'teacher' : bool(has_access(request.user,'teacher', course_key)),
}
if not access['teacher']:
raise Http404("You do not have access to this page")
# This should never need to be raised, since assigning teacher access should always check for this first.
#if not is_teacher(request.user):
# raise Http404("You have the wrong user type to access this page. Please contact the site administrator.")
is_white_label = CourseMode.is_white_label(course_key)
class_form_dict = {}
class_set_form = ClassSetForm()
competition_form = CompetitionSubmissionForm(initial={'contact_email':request.user.email,'contact_name':" ".join([request.user.first_name,request.user.last_name]),'contact_ph':request.user.teacherprofile.phone})
competition_dict = {'competition_submission_form':competition_form}
if request.method == 'POST':
if request.POST.get("formtype") == "competition":
competition_dict.update(_competition_submission_form_handler(request,course_id))
else:
class_set_form = ClassSetForm(request.POST.copy())
if class_set_form.is_valid():
_create_new_class(class_set_form, course_key, request.user)
class_form_dict.update( {'success': 'Class successfully added!'})
return HttpResponseRedirect(reverse('teacher_dashboard',kwargs={'course_id':unicode(course_key)})+'#view-my_classes')
#class_set_form = ClassSetForm() # refresh to blank form
else:
pass # errors will be updated
new_class_dict = _section_my_classes(course,access,request.user)
new_class_dict.update({'class_set_form': class_set_form})
class_code_list = get_class_codes_of_teacher(request.user,course_key)
submission_history = []
for c in class_code_list:
s = get_last_submission_summary(c['class_code'])
if s:
submission_history.append(s)
# arrange competition section, and update with any POST results
competition_section = _section_competition_submission(course,access,class_code_list)
competition_section.update(competition_dict)
competition_section.update({'submission_history': submission_history})
# competition success redirection
if request.method =='POST' and competition_section['success'] == True:
return HttpResponseRedirect(reverse('teacher_dashboard',kwargs={'course_id':unicode(course_key)})+'?success=true#view-competition_submission')
# after competition success redirection
if request.method == 'GET' and request.GET.get('success',None)=="true":
competition_section.update({'success': True})
sections = [
#_section_course_info(course, access),
new_class_dict,
_section_my_students(course, access, is_white_label, request.user, class_code_list),
_section_grade_centre(course, access,class_code_list),
competition_section,
]
context = {
'course': course,
'sections': sections,
}
return render_to_response('instructor/teacher_dashboard/teacher_dashboard.html', context)
def _competition_submission_form_handler(request,course_id):
"""
Handles validating the competition submission form.
Validation Rules
- Basic Form Rules
- Either video or URL must be uploaded
- All file sizes must be under 20MB
- If all rules are satisfied, calls function _upload_competition_submission
to handle the file storage and create a CompetitionSubmission model
Returns: A dictionary of fields to be updated to the section data by the parent view.
"""
competition_dict = {}
competition_dict['video_url_errors']=""
competition_form= CompetitionSubmissionForm(request.POST.copy())
competition_dict['success'] = False # presumes unsuccessful
if competition_form.is_valid():
log.warning("Form is valid")
if not (request.FILES.get('video_upload',False) or request.POST.get('video_url',False)):
competition_dict.update({'video_url_errors':"You require at least a File Upload or URL."})
else:
# upload and process the submission fields and files
success = _upload_competition_submission(request,course_id,competition_form)
competition_dict.update(success)
else:
log.warning("Form is invalid")
if not competition_dict['success']:
competition_dict['competition_submission_form'] = competition_form #assume for now that the form will need to be returned this way, unless there is a success
return competition_dict
def _upload_competition_submission(request,course_id,competition_form):
"""
Handles the entry of a new submission
"""
try:
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
class_code = request.POST.get('class_code')
class_set = ClassSet.objects.get(class_code=class_code)
video_entry = {}
video_url = request.POST.get('video_url',None)
video_file = request.FILES.get('video_upload',None)
media_entry = {}
media_release_files = request.FILES.getlist('media_release_upload')
src_files = request.FILES.getlist('src_upload')
src_entry = {}
# Create entry (without committing to writing to the db yet)
entry = CompetitionSubmission(
device_name=competition_form.cleaned_data['device_name'],
device_description = competition_form.cleaned_data['device_description'],
contact_name = competition_form.cleaned_data['contact_name'],
contact_ph = competition_form.cleaned_data['contact_ph'],
contact_email = competition_form.cleaned_data['contact_email'],
acknowledge_toc = competition_form.cleaned_data['acknowledge_toc'],
attempt_number = CompetitionSubmission.objects.filter(class_set__class_code=class_code).count()+1,
class_set = class_set,
school = class_set.school,
)
# first find out how many submissions this class has made
# first prepare the JSON additions tos
# if there is an easy video URL (sweet no problems)
###
# Video Upload
###
if video_url:
video_entry['url'] = request.POST.get('video_url')
if video_file:
date = datetime.date.today().isoformat()
sha1 = _comp_get_sha1(video_file.name,unicode(course_key),class_code,date)
path = _comp_file_storage_path(sha1,video_file.name,course_key,class_code,entry.attempt_number,'video')
video_entry['video_file_sha1'] = sha1
video_entry['video_file_mimetype'] = mimetypes.guess_type(video_file.name)[0]
video_entry['video_file_name'] = video_file.name
if not default_storage.exists(path):
default_storage.save(path,File(video_file.file))
entry.video_entry = json.dumps(video_entry)
###
# Media Release
###
if len(media_release_files)>0:
media_entry['media_file_names'] = []
media_entry['media_file_sha1s'] = []
media_entry['media_file_mimetypes'] = []
for f in media_release_files:
date = datetime.date.today().isoformat()
sha1 = _comp_get_sha1(f.name,unicode(course_key),class_code,date)
path = _comp_file_storage_path(sha1,f.name,course_key,class_code,entry.attempt_number,'media')
media_entry['media_file_sha1s'].append(sha1)
media_entry['media_file_mimetypes'].append(mimetypes.guess_type(f.name)[0])
media_entry['media_file_names'].append(f.name)
if not default_storage.exists(path):
default_storage.save(path,File(f.file))
entry.media_release = json.dumps(media_entry)
###
# Src Upload
###
if len(media_release_files)>0:
src_entry['src_file_names'] = []
src_entry['src_file_sha1s'] = []
src_entry['src_file_mimetypes'] = []
for f in src_files:
date = datetime.date.today().isoformat()
sha1 = _comp_get_sha1(f.name,unicode(course_key),class_code,date)
path = _comp_file_storage_path(sha1,f.name,course_key,class_code,entry.attempt_number,'src')
src_entry['src_file_sha1s'].append(sha1)
src_entry['src_file_mimetypes'].append(mimetypes.guess_type(f.name)[0])
src_entry['src_file_names'].append(f.name)
if not default_storage.exists(path):
default_storage.save(path,File(f.file))
entry.src_code_entry = json.dumps(src_entry)
entry.save()
log.warning("Saved model")
return {'success': True}
except Exception:
return {'success': False, 'error_msg': True }
def _comp_file_storage_path(sha1, filename,course_key,class_code,attempt,sub_type):
path = (
'competition_submissions/{course_key.org}/{course_key.course}/{class_code}/'
'{attempt}/{sub_type}/{sha1}{ext}'.format(
course_key=course_key,
sha1=sha1,
class_code=class_code,
sub_type=sub_type,
attempt=attempt,
ext=os.path.splitext(filename)[1]
)
)
return path
def _comp_get_sha1(filename,course_id,class_code,date):
sha1 = hashlib.sha1()
sha1.update(course_id)
sha1.update(class_code)
sha1.update(date)
return sha1.hexdigest()
def _create_new_class(form, course_key, user):
""" Create a new class in the database """
class_set = ClassSet(
created_by=user,
teacher=user,
short_name=form.cleaned_data['short_name'],
class_name=form.cleaned_data['class_name'],
school=user.teacherprofile.school,
course_id=course_key,
grade=form.cleaned_data['grade'],
subject=form.cleaned_data['subject'],
assessment=form.cleaned_data['assessment'],
no_of_students=form.cleaned_data['no_of_students'])
try:
class_set.save()
except Exception:
log.error("Error creating class set for %s"%user)
raise
def _section_my_students_old(course,access, is_white_label,user,class_code_list=None):
course_key = course.id
section_data = {
}
section_data = {
'section_key': 'my_students',
'section_display_name':_('My Students'),
'access': access,
'course_id': course_key,
'ccx_is_enabled': False,
'num_sections': len(course.children),
'is_white_label': is_white_label,
'list_students_of_class_code_url': reverse('list_students_of_class_code',kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
'class_code_list': class_code_list,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'upload_student_csv_button_url': reverse('register_and_enroll_students', kwargs={'course_id': unicode(course_key)}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': unicode(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
}
return section_data
# MM NEW: Inherited from membership and modified to return user specific fields.
def _section_my_classes(course, access, user):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'my_classes',
'section_display_name': _('My Classes'),
'access': access,
'course_id': course_key,
'num_sections': len(course.children),
'class_set_form': ClassSetForm(),
}
# get classes (a list of dictionaries)
classes = get_my_classes(user,course_key)
classes_info = []
for c in classes:
cdict = {}
cdict['total_accounts']=get_class_size(c)
cdict['active_accounts']=get_class_size(c,True)
cdict.update(model_to_dict(c,fields=['short_name','class_name','class_code','grade','no_of_students' ,'assessment']))
cdict['school_name'] = c.school.__unicode__()
subjects = dict(ClassSetForm.SUBJECTS)
cdict['subject'] = subjects[c.subject]
classes_info.append(cdict)
section_data['my_classes'] = classes_info
return section_data
#def _section_students(course, access, user):
# """ List the students and link to their profile page. """
# course_key = course.id
#
# section_data = {
# 'section_key': 'students',
# 'section_display_name': _('Students'),
# 'access': access,
# 'course_id': course_key,
# 'num_sections': len(course.children),
# }
#
# return section_data
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id):
""" Display the instructor dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error(u"Unable to find course with course key %s while loading the Instructor Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=0)
access = {
'admin': request.user.is_staff,
'instructor': bool(has_access(request.user, 'instructor', course)),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': bool(has_access(request.user, 'staff', course)),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
}
if not access['staff']:
raise Http404()
is_white_label = CourseMode.is_white_label(course_key)
sections = [
_section_course_info(course, access),
_section_membership(course, access, is_white_label),
_section_cohort_management(course, access),
_section_student_admin(course, access),
_section_data_download(course, access),
]
analytics_dashboard_message = None
if settings.ANALYTICS_DASHBOARD_URL:
# Construct a URL to the external analytics dashboard
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
analytics_dashboard_message = _(
"To gain insights into student enrollment and participation {link_start}"
"visit {analytics_dashboard_name}, our new course analytics product{link_end}."
)
analytics_dashboard_message = analytics_dashboard_message.format(
link_start=link_start, link_end="</a>", analytics_dashboard_name=settings.ANALYTICS_DASHBOARD_NAME)
# Temporarily show the "Analytics" section until we have a better way of linking to Insights
sections.append(_section_analytics(course, access))
# Check if there is corresponding entry in the CourseMode Table related to the Instructor Dashboard course
course_mode_has_price = False
paid_modes = CourseMode.paid_modes_for_course(course_key)
if len(paid_modes) == 1:
course_mode_has_price = True
elif len(paid_modes) > 1:
log.error(
u"Course %s has %s course modes with payment options. Course must only have "
u"one paid course mode to enable eCommerce options.",
unicode(course_key), len(paid_modes)
)
if settings.FEATURES.get('INDIVIDUAL_DUE_DATES') and access['instructor']:
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if bulk_email_is_enabled_for_course(course_key):
sections.append(_section_send_email(course, access))
# Gate access to Metrics tab by featue flag and staff authorization
if settings.FEATURES['CLASS_DASHBOARD'] and access['staff']:
sections.append(_section_metrics(course, access))
# Gate access to Ecommerce tab
if course_mode_has_price and (access['finance_admin'] or access['sales_admin']):
sections.append(_section_e_commerce(course, access, paid_modes[0], is_white_label, is_white_label))
# Gate access to Special Exam tab depending if either timed exams or proctored exams
# are enabled in the course
# NOTE: For now, if we only have procotred exams enabled, then only platform Staff
# (user.is_staff) will be able to view the special exams tab. This may
# change in the future
can_see_special_exams = (
((course.enable_proctored_exams and request.user.is_staff) or course.enable_timed_exams) and
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False)
)
if can_see_special_exams:
sections.append(_section_special_exams(course, access))
# Certificates panel
# This is used to generate example certificates
# and enable self-generated certificates for a course.
certs_enabled = CertificateGenerationConfiguration.current().enabled
if certs_enabled and access['admin']:
sections.append(_section_certificates(course))
disable_buttons = not _is_small_course(course_key)
certificate_white_list = CertificateWhitelist.get_certificate_white_list(course_key)
generate_certificate_exceptions_url = reverse( # pylint: disable=invalid-name
'generate_certificate_exceptions',
kwargs={'course_id': unicode(course_key), 'generate_for': ''}
)
generate_bulk_certificate_exceptions_url = reverse( # pylint: disable=invalid-name
'generate_bulk_certificate_exceptions',
kwargs={'course_id': unicode(course_key)}
)
certificate_exception_view_url = reverse(
'certificate_exception_view',
kwargs={'course_id': unicode(course_key)}
)
certificate_invalidation_view_url = reverse( # pylint: disable=invalid-name
'certificate_invalidation_view',
kwargs={'course_id': unicode(course_key)}
)
certificate_invalidations = CertificateInvalidation.get_certificate_invalidations(course_key)
context = {
'course': course,
'studio_url': get_studio_url(course, 'course'),
'sections': sections,
'disable_buttons': disable_buttons,
'analytics_dashboard_message': analytics_dashboard_message,
'certificate_white_list': certificate_white_list,
'certificate_invalidations': certificate_invalidations,
'generate_certificate_exceptions_url': generate_certificate_exceptions_url,
'generate_bulk_certificate_exceptions_url': generate_bulk_certificate_exceptions_url,
'certificate_exception_view_url': certificate_exception_view_url,
'certificate_invalidation_view_url': certificate_invalidation_view_url,
}
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
## Section functions starting with _section return a dictionary of section data.
## The dictionary must include at least {
## 'section_key': 'circus_expo'
## 'section_display_name': 'Circus Expo'
## }
## section_key will be used as a css attribute, javascript tie-in, and template import filename.
## section_display_name will be used to generate link titles in the nav bar.
def _section_e_commerce(course, access, paid_mode, coupons_enabled, reports_enabled):
""" Provide data for the corresponding dashboard section """
course_key = course.id
coupons = Coupon.objects.filter(course_id=course_key).order_by('-is_active')
course_price = paid_mode.min_price
total_amount = None
if access['finance_admin']:
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_key)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_key)
total_amount = single_purchase_total + bulk_purchase_total
section_data = {
'section_key': 'e-commerce',
'section_display_name': _('E-Commerce'),
'access': access,
'course_id': unicode(course_key),
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'ajax_remove_coupon_url': reverse('remove_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_get_coupon_info': reverse('get_coupon_info', kwargs={'course_id': unicode(course_key)}),
'get_user_invoice_preference_url': reverse('get_user_invoice_preference', kwargs={'course_id': unicode(course_key)}),
'sale_validation_url': reverse('sale_validation', kwargs={'course_id': unicode(course_key)}),
'ajax_update_coupon': reverse('update_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_add_coupon': reverse('add_coupon', kwargs={'course_id': unicode(course_key)}),
'get_sale_records_url': reverse('get_sale_records', kwargs={'course_id': unicode(course_key)}),
'get_sale_order_records_url': reverse('get_sale_order_records', kwargs={'course_id': unicode(course_key)}),
'instructor_url': reverse('instructor_dashboard', kwargs={'course_id': unicode(course_key)}),
'get_registration_code_csv_url': reverse('get_registration_codes', kwargs={'course_id': unicode(course_key)}),
'generate_registration_code_csv_url': reverse('generate_registration_codes', kwargs={'course_id': unicode(course_key)}),
'active_registration_code_csv_url': reverse('active_registration_codes', kwargs={'course_id': unicode(course_key)}),
'spent_registration_code_csv_url': reverse('spent_registration_codes', kwargs={'course_id': unicode(course_key)}),
'set_course_mode_url': reverse('set_course_mode_price', kwargs={'course_id': unicode(course_key)}),
'download_coupon_codes_url': reverse('get_coupon_codes', kwargs={'course_id': unicode(course_key)}),
'enrollment_report_url': reverse('get_enrollment_report', kwargs={'course_id': unicode(course_key)}),
'exec_summary_report_url': reverse('get_exec_summary_report', kwargs={'course_id': unicode(course_key)}),
'list_financial_report_downloads_url': reverse('list_financial_report_downloads',
kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'look_up_registration_code': reverse('look_up_registration_code', kwargs={'course_id': unicode(course_key)}),
'coupons': coupons,
'sales_admin': access['sales_admin'],
'coupons_enabled': coupons_enabled,
'reports_enabled': reports_enabled,
'course_price': course_price,
'total_amount': total_amount
}
return section_data
def _section_special_exams(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'special_exams',
'section_display_name': _('Special Exams'),
'access': access,
'course_id': unicode(course_key)
}
return section_data
def _section_certificates(course):
"""Section information for the certificates panel.
The certificates panel allows global staff to generate
example certificates and enable self-generated certificates
for a course.
Arguments:
course (Course)
Returns:
dict
"""
example_cert_status = None
html_cert_enabled = certs_api.has_html_certificates_enabled(course.id, course)
if html_cert_enabled:
can_enable_for_course = True
else:
example_cert_status = certs_api.example_certificates_status(course.id)
# Allow the user to enable self-generated certificates for students
# *only* once a set of example certificates has been successfully generated.
# If certificates have been misconfigured for the course (for example, if
# the PDF template hasn't been uploaded yet), then we don't want
# to turn on self-generated certificates for students!
can_enable_for_course = (
example_cert_status is not None and
all(
cert_status['status'] == 'success'
for cert_status in example_cert_status
)
)
instructor_generation_enabled = settings.FEATURES.get('CERTIFICATES_INSTRUCTOR_GENERATION', False)
certificate_statuses_with_count = {
certificate['status']: certificate['count']
for certificate in GeneratedCertificate.get_unique_statuses(course_key=course.id)
}
return {
'section_key': 'certificates',
'section_display_name': _('Certificates'),
'example_certificate_status': example_cert_status,
'can_enable_for_course': can_enable_for_course,
'enabled_for_course': certs_api.cert_generation_enabled(course.id),
'instructor_generation_enabled': instructor_generation_enabled,
'html_cert_enabled': html_cert_enabled,
'active_certificate': certs_api.get_active_web_certificate(course),
'certificate_statuses_with_count': certificate_statuses_with_count,
'status': CertificateStatuses,
'certificate_generation_history':
CertificateGenerationHistory.objects.filter(course_id=course.id).order_by("-created"),
'urls': {
'generate_example_certificates': reverse(
'generate_example_certificates',
kwargs={'course_id': course.id}
),
'enable_certificate_generation': reverse(
'enable_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_generation': reverse(
'start_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_regeneration': reverse(
'start_certificate_regeneration',
kwargs={'course_id': course.id}
),
'list_instructor_tasks_url': reverse(
'list_instructor_tasks',
kwargs={'course_id': course.id}
),
}
}
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_POST
@login_required
def set_course_mode_price(request, course_id):
"""
set the new course price and add new entry in the CourseModesArchive Table
"""
try:
course_price = int(request.POST['course_price'])
except ValueError:
return JsonResponse(
{'message': _("Please Enter the numeric value for the course price")},
status=400) # status code 400: Bad Request
currency = request.POST['currency']
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course_honor_mode = CourseMode.objects.filter(mode_slug='honor', course_id=course_key)
if not course_honor_mode:
return JsonResponse(
{'message': _("CourseMode with the mode slug({mode_slug}) DoesNotExist").format(mode_slug='honor')},
status=400) # status code 400: Bad Request
CourseModesArchive.objects.create(
course_id=course_id, mode_slug='honor', mode_display_name='Honor Code Certificate',
min_price=course_honor_mode[0].min_price, currency=course_honor_mode[0].currency,
expiration_datetime=datetime.datetime.now(pytz.utc), expiration_date=datetime.date.today()
)
course_honor_mode.update(
min_price=course_price,
currency=currency
)
return JsonResponse({'message': _("CourseMode price updated successfully")})
def _section_my_students(course, access, is_white_label, user, class_code_list=None):
""" Provide data for the corresponding dashboard section """
course_key = course.id
ccx_enabled = settings.FEATURES.get('CUSTOM_COURSES_EDX', False) and course.enable_ccx
section_data = {
'section_key': 'membership',
'section_display_name': _('My Students'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'class_code_list': class_code_list,
'is_white_label': is_white_label,
'list_students_of_class_code_url': reverse('list_students_of_class_code',kwargs={'course_id': unicode(course_key)}),
'modify_students_of_class_code_url': reverse('modify_students_of_class_code',kwargs={'course_id': unicode(course_key)}),#TODO: ADD URLS IN
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'upload_student_csv_button_url': reverse('register_and_enroll_students', kwargs={'course_id': unicode(course_key)}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': unicode(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_course_info(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_id': course_key,
'course_display_name': course.display_name,
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'start_date': get_default_time_display(course.start),
'end_date': get_default_time_display(course.end) or _('No end date set'),
'num_sections': len(course.children),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
}
if settings.FEATURES.get('DISPLAY_ANALYTICS_ENROLLMENTS'):
section_data['enrollment_count'] = CourseEnrollment.objects.enrollment_counts(course_key)
if settings.ANALYTICS_DASHBOARD_URL:
dashboard_link = _get_dashboard_link(course_key)
message = _("Enrollment data is now available in {dashboard_link}.").format(dashboard_link=dashboard_link)
section_data['enrollment_message'] = message
if settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'):
section_data['detailed_gitlogs_url'] = reverse('gitlogs_detail', kwargs={'course_id': unicode(course_key)})
try:
sorted_cutoffs = sorted(course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True)
advance = lambda memo, (letter, score): "{}: {}, ".format(letter, score) + memo
section_data['grade_cutoffs'] = reduce(advance, sorted_cutoffs, "")[:-2]
except Exception: # pylint: disable=broad-except
section_data['grade_cutoffs'] = "Not Available"
# section_data['offline_grades'] = offline_grades_available(course_key)
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]
except Exception: # pylint: disable=broad-except
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course, access, is_white_label):
""" Provide data for the corresponding dashboard section """
course_key = course.id
ccx_enabled = settings.FEATURES.get('CUSTOM_COURSES_EDX', False) and course.enable_ccx
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'is_white_label': is_white_label,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'upload_student_csv_button_url': reverse('register_and_enroll_students', kwargs={'course_id': unicode(course_key)}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': unicode(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_cohort_management(course, access):
""" Provide data for the corresponding cohort management section """
course_key = course.id
section_data = {
'section_key': 'cohort_management',
'section_display_name': _('Cohorts'),
'access': access,
'course_cohort_settings_url': reverse(
'course_cohort_settings',
kwargs={'course_key_string': unicode(course_key)}
),
'cohorts_url': reverse('cohorts', kwargs={'course_key_string': unicode(course_key)}),
'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': unicode(course_key)}),
'discussion_topics_url': reverse('cohort_discussion_topics', kwargs={'course_key_string': unicode(course_key)}),
}
return section_data
def _is_small_course(course_key):
""" Compares against MAX_ENROLLMENT_INSTR_BUTTONS to determine if course enrollment is considered small. """
is_small_course = False
enrollment_count = CourseEnrollment.objects.num_enrolled_in(course_key)
max_enrollment_for_buttons = settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
if max_enrollment_for_buttons is not None:
is_small_course = enrollment_count <= max_enrollment_for_buttons
return is_small_course
def _section_student_admin(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
is_small_course = _is_small_course(course_key)
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'is_small_course': is_small_course,
'get_student_progress_url_url': reverse('get_student_progress_url', kwargs={'course_id': unicode(course_key)}),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_url': reverse('reset_student_attempts', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_for_entrance_exam_url': reverse(
'reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': unicode(course_key)}),
'rescore_entrance_exam_url': reverse('rescore_entrance_exam', kwargs={'course_id': unicode(course_key)}),
'student_can_skip_entrance_exam_url': reverse(
'mark_student_can_skip_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_entrace_exam_instructor_tasks_url': reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(course_key)}),
'spoc_gradebook_url': reverse('spoc_gradebook', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), unicode(unit.location))
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': unicode(course.id)}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': unicode(course.id)}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': unicode(course.id)}),
'show_student_extensions_url': reverse('show_student_extensions', kwargs={'course_id': unicode(course.id)}),
}
return section_data
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
show_proctored_report_button = (
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False) and
course.enable_proctored_exams
)
section_data = {
'section_key': 'data_download',
'section_display_name': _('Data Download'),
'access': access,
'show_generate_proctored_exam_report_button': show_proctored_report_button,
'get_problem_responses_url': reverse('get_problem_responses', kwargs={'course_id': unicode(course_key)}),
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': unicode(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': unicode(course_key)}),
'get_issued_certificates_url': reverse(
'get_issued_certificates', kwargs={'course_id': unicode(course_key)}
),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': unicode(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': unicode(course_key)}),
'list_proctored_results_url': reverse('get_proctored_exam_results', kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': unicode(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}),
'course_has_survey': True if course.course_survey_name else False,
'course_survey_results_url': reverse('get_course_survey_results', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_grade_centre(course, access,class_code_list):
""" Provide data for the corresponding dashboard section """
course_key = course.id
assignment_names = get_assignment_names(course_key)
section_data = {
'section_key': 'data_download',
'section_display_name': _('Grade Centre'),
'access': access,
'gradebook_url':reverse('teacher_gradebook_min', kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv_class_code', kwargs={'course_id': unicode(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}),
'submissions_report_url': reverse('submissions_report', kwargs={'course_id': unicode(course_key)}),
'download_class_submissions_url': reverse('download_class_submissions', kwargs={'course_id': unicode(course_key)}),
'class_code_list': class_code_list,
'assignment_names': assignment_names,
}
return section_data
def _section_competition_submission(course,access,class_code_list):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'competition_submission',
'section_display_name': _('Competition'),
'access': access,
#'submit_entry_url': reverse('', kwargs={'course_id': unicode(course_key)}),
'upload_file_url': 'upload_file',
'class_code_list': class_code_list,
'competition_submission_form': CompetitionSubmissionForm(),
'video_upload_errors': "",
'video_url_errors': "",
'media_release_upload_errors': "",
'src_upload_errors': "",
'success': False,
'error_msg': False,
}
return section_data
def null_applicable_aside_types(block): # pylint: disable=unused-argument
"""
get_aside method for monkey-patching into applicable_aside_types
while rendering an HtmlDescriptor for email text editing. This returns
an empty list.
"""
return []
def _section_send_email(course, access):
""" Provide data for the corresponding bulk email section """
course_key = course.id
# Monkey-patch applicable_aside_types to return no asides for the duration of this render
with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types):
# This HtmlDescriptor is only being used to generate a nice text editor.
html_module = HtmlDescriptor(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake'))
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": unicode(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(unicode(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().get_hex()
)
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': unicode(course_key)}),
'editor': email_editor,
'list_instructor_tasks_url': reverse(
'list_instructor_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_background_tasks_url': reverse(
'list_background_email_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_content_history_url': reverse(
'list_email_content', kwargs={'course_id': unicode(course_key)}
),
}
return section_data
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link = u"<a href=\"{0}\" target=\"_blank\">{1}</a>".format(analytics_dashboard_url,
settings.ANALYTICS_DASHBOARD_NAME)
return link
def _section_analytics(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
insights_message = _("For analytics about your course, go to {analytics_dashboard_name}.")
insights_message = insights_message.format(
analytics_dashboard_name=u'{0}{1}</a>'.format(link_start, settings.ANALYTICS_DASHBOARD_NAME)
)
section_data = {
'section_key': 'instructor_analytics',
'section_display_name': _('Analytics'),
'access': access,
'insights_message': insights_message,
}
return section_data
def _section_metrics(course, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'metrics',
'section_display_name': _('Metrics'),
'access': access,
'course_id': unicode(course_key),
'sub_section_display_name': get_section_display_name(course_key),
'section_has_problem': get_array_section_has_problem(course_key),
'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),
'get_students_problem_grades_url': reverse('get_students_problem_grades'),
'post_metrics_data_csv_url': reverse('post_metrics_data_csv'),
}
return section_data
@xframe_options_exempt
@staff_member_required
def tawk_admin(request):
context = {}
return render_to_response('instructor/tawk_admin.html', context)
def get_user_school_summary(request):
if not (request.user.is_staff or request.user.is_superuser):
return HttpResponseForbidden()
identifier = request.GET.get("identifier")
try:
student = get_student_from_identifier(identifier)
response = {"success": True,
"data" : _get_user_info(student)}
except User.DoesNotExist:
response = {"success": False,
"msg": "No user found"}
return JsonResponse(response)
def _get_user_info(user):
response = {}
response["username"]= user.username
response["email"]= user.email
response["gender"]= user.profile.gender
response["first_name"]= user.first_name
#add first name, username, email, gender
if is_teacher(user):
#account type
#show school
response["last_name"]= user.last_name
response["user_type"] = "teacher"
response["school"] = user.teacherprofile.school.__unicode__()
response["classes"] = []
classes = get_my_classes(user)
#show classes by classcode and how many students in each
for c in classes:
c_dict = {}
c_dict["class_code"]= c.class_code
c_dict["size"]= get_class_size(c,is_active=True)
response["classes"].append(c_dict)
#if user has studentprofile
elif is_student(user):
response["user_type"] = "student"
response["grade"] = user.studentprofile.school_grade
#account type
classes = get_student_class_info(user)
if not classes:
classes = "Orphaned account. This student has no class"
response["classes"] = classes
return response
|
MakeHer/edx-platform
|
lms/djangoapps/instructor/views/instructor_dashboard.py
|
Python
|
agpl-3.0
| 52,947
|
[
"VisIt"
] |
fa0f286da66df963d6c07a5275f96a9c48e58712671bc1d9c088bd389cae7c1e
|
'''
Created on Jul 22, 2011
@author: mkiyer
'''
'''
Created on Jan 30, 2011
@author: mkiyer
chimerascan: chimeric transcript discovery using RNA-seq
Copyright (C) 2011 Matthew Iyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import logging
import os
from chimerascan import pysam
from chimerascan.lib import config
from chimerascan.lib.sam import parse_pe_reads
from chimerascan.lib.chimera import Chimera
def to_fastq(qname, readnum, seq, qual):
return "@%s/%d\n%s\n+\n%s" % (qname, readnum+1, seq, qual)
def nominate_encomp_spanning_reads(chimera_file, output_fastq_file):
"""
find all encompassing reads that should to be remapped to see if they
span the breakpoint junction
"""
fqfh = open(output_fastq_file, "w")
remap_qnames = set()
for c in Chimera.parse(open(chimera_file)):
# find breakpoint coords of chimera
end5p = c.partner5p.end
start3p = c.partner3p.start
for r5p,r3p in c.encomp_read_pairs:
# if 5' read overlaps breakpoint then it should be remapped
if r5p.clipstart < end5p < r5p.clipend:
key5p = (r5p.qname, r5p.readnum)
if key5p not in remap_qnames:
remap_qnames.add((r5p.qname, r5p.readnum))
print >>fqfh, to_fastq(r5p.qname, r5p.readnum,
r5p.seq, "I" * len(r5p.seq))
# if 3' read overlaps breakpoint then it should be remapped
if r3p.clipstart < start3p < r3p.clipend:
key3p = (r3p.qname, r3p.readnum)
if key3p not in remap_qnames:
remap_qnames.add((r3p.qname, r3p.readnum))
print >>fqfh, to_fastq(r3p.qname, r3p.readnum,
r3p.seq, "I" * len(r3p.seq))
fqfh.close()
return config.JOB_SUCCESS
def nominate_unmapped_spanning_reads(unmapped_bam_file, output_fastq_file):
# find all reads that need to be remapped to see if they span the
# breakpoint junction
fqfh = open(output_fastq_file, "w")
# check read pairs with one or both unmapped, and remap those
# as well
bamfh = pysam.Samfile(unmapped_bam_file, "rb")
for pe_reads in parse_pe_reads(bamfh):
# remap all unmapped reads
for readnum,reads in enumerate(pe_reads):
if any(r.is_unmapped for r in reads):
print >>fqfh, to_fastq(pe_reads[readnum][0].qname, readnum,
pe_reads[readnum][0].seq,
pe_reads[readnum][0].qual)
bamfh.close()
fqfh.close()
return config.JOB_SUCCESS
def main():
from optparse import OptionParser
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
parser = OptionParser("usage: %prog [options] <chimeras.txt> "
"<unmapped_reads.bam> <encomp_remap.fq> "
"<unmapped_remap.fq>")
options, args = parser.parse_args()
chimera_file = args[0]
bam_file = args[1]
encomp_remap_fastq_file = args[2]
spanning_fastq_file = args[3]
nominate_encomp_spanning_reads(chimera_file, encomp_remap_fastq_file)
nominate_unmapped_spanning_reads(bam_file, spanning_fastq_file)
if __name__ == '__main__':
main()
|
tectronics/chimerascan
|
chimerascan/deprecated/nominate_spanning_reads_v03.py
|
Python
|
gpl-3.0
| 3,971
|
[
"pysam"
] |
ecfecb548150a81936e6fc9e041fff37d88477939c94547e4239d39e80b1f2f9
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Unit tests for data_ingestion.py script."""
#
# (C) Pywikibot team, 2012-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
import os
from tests import _data_dir
from tests import _images_dir
from tests.aspects import unittest, TestCase, ScriptMainTestCase
from scripts import data_ingestion
class TestPhoto(TestCase):
"""Test Photo class."""
sites = {
'wm-upload': {
'hostname': 'upload.wikimedia.org',
},
'commons': {
'family': 'commons',
'code': 'commons',
},
}
def setUp(self):
super(TestPhoto, self).setUp()
self.obj = data_ingestion.Photo(URL='http://upload.wikimedia.org/wikipedia/commons/f/fc/MP_sounds.png',
metadata={'description.en': '"Sounds" icon',
'source': 'http://commons.wikimedia.org/wiki/File:Sound-icon.svg',
'author': 'KDE artists | Silstor',
'license': 'LGPL',
'set': 'Crystal SVG icon set',
'name': 'Sound icon'},
site=self.get_site('commons'))
def test_downloadPhoto(self):
"""Test download from http://upload.wikimedia.org/."""
with open(os.path.join(_images_dir, 'MP_sounds.png'), 'rb') as f:
self.assertEqual(f.read(), self.obj.downloadPhoto().read())
def test_findDuplicateImages(self):
"""Test finding duplicates on Wikimedia Commons."""
duplicates = self.obj.findDuplicateImages()
self.assertIn('MP sounds.png', [dup.replace("_", " ") for dup in duplicates])
def test_getTitle(self):
self.assertEqual(self.obj.getTitle("%(name)s - %(set)s.%(_ext)s"), "Sound icon - Crystal SVG icon set.png")
def test_getDescription(self):
self.assertEqual(self.obj.getDescription('CrystalTemplate'),
"""{{CrystalTemplate
|author=KDE artists {{!}} Silstor
|description.en="Sounds" icon
|license=LGPL
|name=Sound icon
|set=Crystal SVG icon set
|source=http://commons.wikimedia.org/wiki/File:Sound-icon.svg
}}""") # noqa
class TestCSVReader(TestCase):
"""Test CSVReader class."""
family = 'commons'
code = 'commons'
def setUp(self):
super(TestCSVReader, self).setUp()
with open(os.path.join(_data_dir, 'csv_ingestion.csv')) as fileobj:
self.iterator = data_ingestion.CSVReader(fileobj, 'url',
site=self.get_site())
self.obj = next(self.iterator)
def test_PhotoURL(self):
self.assertEqual(self.obj.URL, 'http://upload.wikimedia.org/wikipedia/commons/f/fc/MP_sounds.png')
def test_getTitle(self):
self.assertEqual(self.obj.getTitle("%(name)s - %(set)s.%(_ext)s"), "Sound icon - Crystal SVG icon set.png")
def test_getDescription(self):
self.assertEqual(self.obj.getDescription('CrystalTemplate'),
"""{{CrystalTemplate
|author=KDE artists {{!}} Silstor
|description.en="Sounds" icon
|license=LGPL
|name=Sound icon
|set=Crystal SVG icon set
|source=http://commons.wikimedia.org/wiki/File:Sound-icon.svg
|url=http://upload.wikimedia.org/wikipedia/commons/f/fc/MP_sounds.png
}}""") # noqa
class TestDataIngestionBot(ScriptMainTestCase):
"""Test TestDataIngestionBot class."""
family = 'test'
code = 'test'
def test_existing_file(self):
"""Test uploading a file that already exists."""
data_ingestion.main(
'-csvdir:tests/data',
'-page:User:John_Vandenberg/data_ingestion_test_template')
if __name__ == "__main__":
unittest.main()
|
emijrp/pywikibot-core
|
tests/data_ingestion_tests.py
|
Python
|
mit
| 3,897
|
[
"CRYSTAL"
] |
cd9271c965fa2e10f16948d6703c245a0a6156d99171d4fd8d60e3dc327debcb
|
""" Test_RSS_Policy_DTPolicy
"""
__RCSID__ = '$Id: $'
from mock import MagicMock
import unittest
from DIRAC import gLogger
import DIRAC.ResourceStatusSystem.Policy.DowntimePolicy as moduleTested
################################################################################
class DTPolicy_TestCase( unittest.TestCase ):
def setUp( self ):
""" Setup
"""
gLogger.setLevel( 'DEBUG' )
self.moduleTested = moduleTested
self.testClass = self.moduleTested.DowntimePolicy
self.DTCommand = MagicMock()
def tearDown( self ):
""" TearDown
"""
del self.testClass
del self.moduleTested
################################################################################
# Tests
class DTPolicy_Success( DTPolicy_TestCase ):
def test_instantiate( self ):
""" tests that we can instantiate one object of the tested class
"""
policy = self.testClass()
self.assertEqual( 'DowntimePolicy', policy.__class__.__name__ )
def test_evaluate( self ):
""" tests the evaluate method
"""
policy = self.testClass()
# command failing
self.DTCommand.doCommand.return_value = { 'OK' : False, 'Message' : 'Grumpy command' }
policy.setCommand( self.DTCommand )
res = policy.evaluate()
self.assertTrue(res['OK'])
self.assertEqual( 'Grumpy command', res['Value']['Reason'] )
self.assertEqual( 'Error', res['Value']['Status'] )
# command failing /2
self.DTCommand.doCommand.return_value = { 'OK' : True, 'Value' : {'Severity': 'XYZ',
'EndDate' : 'Y',
'DowntimeID': '123',
'Description': 'blah' } }
self.assertEqual( 'Error', res['Value']['Status'] )
res = policy.evaluate()
self.assertTrue( res[ 'OK' ] )
# command result empty
self.DTCommand.doCommand.return_value = {'OK': True, 'Value': None}
res = policy.evaluate()
self.assertTrue( res[ 'OK' ] )
self.assertEqual( 'Active', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'No DownTime announced', res[ 'Value' ][ 'Reason' ] )
# command result with a DT
self.DTCommand.doCommand.return_value = { 'OK' : True, 'Value' : {'Severity':'OUTAGE',
'EndDate':'Y',
'DowntimeID': '123',
'Description': 'blah' }}
policy.command = self.DTCommand
res = policy.evaluate()
self.assertTrue(res['OK'])
self.assertEqual( 'Banned', res[ 'Value' ][ 'Status' ] )
self.assertEqual( '123 blah', res[ 'Value' ][ 'Reason' ] )
# command mock
self.DTCommand.doCommand.return_value = { 'OK' : True, 'Value' : {'Severity': 'WARNING',
'EndDate': 'Y',
'DowntimeID': '123',
'Description': 'blah' }}
policy.command = self.DTCommand
res = policy.evaluate()
self.assertTrue(res['OK'])
self.assertEqual( 'Degraded', res[ 'Value' ][ 'Status' ] )
self.assertEqual( '123 blah', res[ 'Value' ][ 'Reason' ] )
################################################################################
################################################################################
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( DTPolicy_TestCase )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( DTPolicy_Success ) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
andresailer/DIRAC
|
ResourceStatusSystem/Policy/test/Test_RSS_Policy_DTPolicy.py
|
Python
|
gpl-3.0
| 3,951
|
[
"DIRAC"
] |
4e453ac4dda44431f015cfbfeab0de21c395ae841ab61497044082abc2449c20
|
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Implement Roulette Wheel selection on a population.
This implements Roulette Wheel selection in which individuals are
selected from a population randomly, with their proportion of selection
based on their relative fitness in the population.
"""
# standard modules
import random
import copy
# local modules
from .Abstract import AbstractSelection
class RouletteWheelSelection(AbstractSelection):
"""Roulette wheel selection proportional to individuals fitness.
The implements a roulette wheel selector that selects individuals
from the population, and performs mutation and crossover on
the selected individuals.
"""
def __init__(self, mutator, crossover, repairer=None):
"""Initialize the selector.
Arguments:
o mutator -- A Mutation object which will perform mutation
on an individual.
o crossover -- A Crossover object which will take two
individuals and produce two new individuals which may
have had crossover occur.
o repairer -- A class which can do repair on rearranged genomes
to eliminate infeasible individuals. If set at None, so repair
will be done.
"""
AbstractSelection.__init__(self, mutator, crossover, repairer)
def select(self, population):
"""Perform selection on the population based using a Roulette model.
Arguments:
o population -- A population of organisms on which we will perform
selection. The individuals are assumed to have fitness values which
are due to their current genome.
"""
# set up the current probabilities for selecting organisms
# from the population
prob_wheel = self._set_up_wheel(population)
probs = sorted(prob_wheel)
# now create the new population with the same size as the original
new_population = []
for pair_spin in range(len(population) // 2):
# select two individuals using roulette wheel selection
choice_num_1 = random.random()
choice_num_2 = random.random()
# now grab the two organisms from the probabilities
chosen_org_1 = None
chosen_org_2 = None
prev_prob = 0
for cur_prob in probs:
if choice_num_1 > prev_prob and choice_num_1 <= cur_prob:
chosen_org_1 = prob_wheel[cur_prob]
if choice_num_2 > prev_prob and choice_num_2 <= cur_prob:
chosen_org_2 = prob_wheel[cur_prob]
prev_prob = cur_prob
assert chosen_org_1 is not None, "Didn't select organism one"
assert chosen_org_2 is not None, "Didn't select organism two"
# do mutation and crossover to get the new organisms
new_org_1, new_org_2 = self.mutate_and_crossover(chosen_org_1,
chosen_org_2)
new_population.extend([new_org_1, new_org_2])
return new_population
def _set_up_wheel(self, population):
"""Set up the roulette wheel based on the fitnesses.
This creates a fitness proportional 'wheel' that will be used for
selecting based on random numbers.
Returns:
o A dictionary where the keys are the 'high' value that an
individual will be selected. The low value is determined by
the previous key in a sorted list of keys. For instance, if we
have a sorted list of keys like:
[.1, .3, .7, 1]
Then the individual whose key is .1 will be selected if a number
between 0 and .1 is chosen, the individual whose key is .3 will
be selected if the number is between .1 and .3, and so on.
The values of the dictionary are the organism instances.
"""
# first sum up the total fitness in the population
total_fitness = 0
for org in population:
total_fitness += org.fitness
# now create the wheel dictionary for all of the individuals
wheel_dict = {}
total_percentage = 0
for org in population:
org_percentage = float(org.fitness) / float(total_fitness)
# the organisms chance of being picked goes from the previous
# percentage (total_percentage) to the previous percentage
# plus the organisms specific fitness percentage
wheel_dict[total_percentage + org_percentage] = copy.copy(org)
# keep a running total of where we are at in the percentages
total_percentage += org_percentage
return wheel_dict
|
updownlife/multipleK
|
dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/GA/Selection/RouletteWheel.py
|
Python
|
gpl-2.0
| 4,824
|
[
"Biopython"
] |
f5d7b954cb2403eda253bad707c936b2cfafdf33a8e53da0907b9a87149e5b53
|
#!/usr/bin/env python
########################################################################
# File : dirac-admin-get-pilot-logging-info.py
# Author : Stuart Paterson
########################################################################
"""
Retrieve logging info of a Grid pilot
Usage:
dirac-admin-get-pilot-logging-info [options] ... PilotID ...
Arguments:
PilotID: Grid ID of the pilot
Example:
$ dirac-admin-get-pilot-logging-info https://marlb.in2p3.fr:9000/26KCLKBFtxXKHF4_ZrQjkw
Pilot Reference: dirac-admin-get-pilot-logging-info https://marlb.in2p3.fr:9000/26KCLKBFtxXKHF4_ZrQjkw
===================== glite-job-logging-info Success =====================
LOGGING INFORMATION:
Printing info for the Job : https://marlb.in2p3.fr:9000/26KCLKBFtxXKHF4_ZrQjkw
---
Event: RegJob
- Arrived = Mon Feb 21 13:27:50 2011 CET
- Host = marwms.in2p3.fr
- Jobtype = SIMPLE
- Level = SYSTEM
- Ns = https://marwms.in2p3.fr:7443/glite_wms_wmproxy_server
- Nsubjobs = 0
- Parent = https://marlb.in2p3.fr:9000/WQHVOB1mI4oqrlYz2ZKtgA
- Priority = asynchronous
- Seqcode = UI=000000:NS=0000000001:WM=000000:BH=0000000000:JSS=000000:LM=000000:LRMS=000000:APP=000000:LBS=000000
- Source = NetworkServer
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
# pylint: disable=wrong-import-position
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
@DIRACScript()
def main():
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
if len(args) < 1:
Script.showHelp()
from DIRAC import exit as DIRACExit
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
for gridID in args:
result = diracAdmin.getPilotLoggingInfo(gridID)
if not result['OK']:
errorList.append((gridID, result['Message']))
exitCode = 2
else:
print('Pilot Reference: %s', gridID)
print(result['Value'])
print()
for error in errorList:
print("ERROR %s: %s" % error)
DIRACExit(exitCode)
if __name__ == "__main__":
main()
|
yujikato/DIRAC
|
src/DIRAC/Interfaces/scripts/dirac_admin_get_pilot_logging_info.py
|
Python
|
gpl-3.0
| 2,247
|
[
"DIRAC"
] |
b68afa798d596070af18e5dc1162c16cdc63158715f93f5c4341f04115ca94bc
|
"""
Structure connectivity class.
"""
import collections
import logging
import networkx as nx
import numpy as np
from monty.json import MSONable, jsanitize
from pymatgen.analysis.chemenv.connectivity.connected_components import (
ConnectedComponent,
)
from pymatgen.analysis.chemenv.connectivity.environment_nodes import (
get_environment_node,
)
from pymatgen.analysis.chemenv.coordination_environments.structure_environments import (
LightStructureEnvironments,
)
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "1.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "June 25, 2019"
def get_delta_image(isite1, isite2, data1, data2):
"""
Helper method to get the delta image between one environment and another
from the ligand's delta images.
"""
if data1["start"] == isite1:
if data2["start"] == isite2:
return np.array(data1["delta"]) - np.array(data2["delta"])
return np.array(data1["delta"]) + np.array(data2["delta"])
if data2["start"] == isite2:
return -np.array(data1["delta"]) - np.array(data2["delta"])
return -np.array(data1["delta"]) + np.array(data2["delta"])
class StructureConnectivity(MSONable):
"""
Main class containing the connectivity of a structure.
"""
def __init__(
self,
light_structure_environment,
connectivity_graph=None,
environment_subgraphs=None,
):
"""
Constructor for the StructureConnectivity object.
Args:
light_structure_environment: a LightStructureEnvironments object
containing the relevant local environments
for the sites in the structure.
connectivity_graph: the networkx MultiGraph if it has already been computed,
e.g. stored in a file or dict and StructureConnectivity
is reconstructed from that file or dict.
environment_subgraphs: the different subgraphs of environments that have
been computed if any (as for connectivity_graph, only
if it is reconstructed from a file or dict).
"""
self.light_structure_environments = light_structure_environment
if connectivity_graph is None:
self._graph = nx.MultiGraph()
else:
self._graph = connectivity_graph
if environment_subgraphs is None:
self.environment_subgraphs = {}
else:
self.environment_subgraphs = environment_subgraphs
def environment_subgraph(self, environments_symbols=None, only_atoms=None):
"""
Args:
environments_symbols ():
only_atoms ():
Returns:
"""
if environments_symbols is not None:
self.setup_environment_subgraph(environments_symbols=environments_symbols, only_atoms=only_atoms)
try:
return self._environment_subgraph
except AttributeError:
all_envs = self.light_structure_environments.environments_identified()
self.setup_environment_subgraph(environments_symbols=all_envs, only_atoms=only_atoms)
return self._environment_subgraph
def add_sites(self):
"""
Add the sites in the structure connectivity graph.
"""
self._graph.add_nodes_from(list(range(len(self.light_structure_environments.structure))))
def add_bonds(self, isite, site_neighbors_set):
"""
Add the bonds for a given site index to the structure connectivity graph.
Args:
isite: Index of the site for which the bonds have to be added.
site_neighbors_set: site_neighbors_set: Neighbors set of the site
"""
existing_edges = self._graph.edges(nbunch=[isite], data=True)
for nb_index_and_image in site_neighbors_set.neighb_indices_and_images:
nb_index_unitcell = nb_index_and_image["index"]
nb_image_cell = nb_index_and_image["image_cell"]
exists = False
if np.allclose(nb_image_cell, np.zeros(3)):
for (isite1, ineighb1, data1) in existing_edges:
if np.allclose(data1["delta"], np.zeros(3)) and nb_index_unitcell == ineighb1:
exists = True
break
else:
if isite == nb_index_unitcell:
for (isite1, ineighb1, data1) in existing_edges:
if isite1 == ineighb1:
if np.allclose(data1["delta"], nb_image_cell) or np.allclose(
data1["delta"], -nb_image_cell
):
exists = True
break
else:
for (isite1, ineighb1, data1) in existing_edges:
if nb_index_unitcell == ineighb1:
if data1["start"] == isite:
if np.allclose(data1["delta"], nb_image_cell):
exists = True
break
elif data1["end"] == isite:
if np.allclose(data1["delta"], -nb_image_cell):
exists = True
break
else:
raise ValueError("SHOULD NOT HAPPEN ???")
if not exists:
self._graph.add_edge(
isite,
nb_index_unitcell,
start=isite,
end=nb_index_unitcell,
delta=nb_image_cell,
)
def setup_environment_subgraph(self, environments_symbols, only_atoms=None):
"""
Set up the graph for predefined environments and optionally atoms.
Args:
environments_symbols: Symbols of the environments for the environment subgraph.
only_atoms: Atoms to be considered.
"""
logging.info(f"Setup of environment subgraph for environments {', '.join(environments_symbols)}")
if not isinstance(environments_symbols, collections.abc.Iterable):
environments_symbols = [environments_symbols]
environments_symbols = sorted(environments_symbols)
envs_string = "-".join(environments_symbols)
if only_atoms is not None:
envs_string += "#" + "-".join(sorted(only_atoms))
# Get it directly if it was already computed
if envs_string in self.environment_subgraphs:
self._environment_subgraph = self.environment_subgraphs[envs_string]
return
# Initialize graph for a subset of environments
self._environment_subgraph = nx.MultiGraph()
# Add the sites with the required environment(s)
for isite, ce_this_site_all in enumerate(self.light_structure_environments.coordination_environments):
if ce_this_site_all is None:
continue
if len(ce_this_site_all) == 0:
continue
ce_this_site = ce_this_site_all[0]["ce_symbol"]
if ce_this_site in environments_symbols:
if only_atoms is None:
env_node = get_environment_node(
self.light_structure_environments.structure[isite],
isite,
ce_this_site,
)
self._environment_subgraph.add_node(env_node)
else:
if self.light_structure_environments.structure.is_ordered:
if self.light_structure_environments.structure[isite].specie.symbol in only_atoms:
env_node = get_environment_node(
self.light_structure_environments.structure[isite],
isite,
ce_this_site,
)
self._environment_subgraph.add_node(env_node)
else:
# TODO: add the possibility of a "constraint" on the minimum percentage
# of the atoms on the site
this_site_elements = [
sp.symbol for sp in self.light_structure_environments.structure[isite].species_and_occu
]
for elem_symbol in this_site_elements:
if elem_symbol in only_atoms:
env_node = get_environment_node(
self.light_structure_environments.structure[isite],
isite,
ce_this_site,
)
self._environment_subgraph.add_node(env_node)
break
# Find the connections between the environments
nodes = list(self._environment_subgraph.nodes())
for inode1, node1 in enumerate(nodes):
isite1 = node1.isite
links_node1 = self._graph.edges(isite1, data=True)
for inode2, node2 in enumerate(nodes[inode1:]):
isite2 = node2.isite
links_node2 = self._graph.edges(isite2, data=True)
# We look for ligands that are common to both site1 and site2
connections_site1_site2 = {}
for (site1_1, ilig_site1, d1) in links_node1:
for (site2_1, ilig_site2, d2) in links_node2:
if ilig_site1 == ilig_site2:
delta_image = get_delta_image(isite1, isite2, d1, d2)
if isite1 == isite2 and np.all(delta_image == 0):
continue
tuple_delta_image = tuple(delta_image)
if tuple_delta_image in connections_site1_site2:
connections_site1_site2[tuple_delta_image].append((ilig_site1, d1, d2))
else:
connections_site1_site2[tuple_delta_image] = [(ilig_site1, d1, d2)]
# Remove the double self-loops ...
if isite1 == isite2:
remove_deltas = []
alldeltas = list(connections_site1_site2.keys())
alldeltas2 = list(connections_site1_site2.keys())
if (0, 0, 0) in alldeltas:
alldeltas.remove((0, 0, 0))
alldeltas2.remove((0, 0, 0))
for current_delta in alldeltas:
opp_current_delta = tuple(-dd for dd in current_delta)
if opp_current_delta in alldeltas2:
remove_deltas.append(current_delta)
alldeltas2.remove(current_delta)
alldeltas2.remove(opp_current_delta)
for remove_delta in remove_deltas:
connections_site1_site2.pop(remove_delta)
# Add all the edges
for conn, ligands in list(connections_site1_site2.items()):
self._environment_subgraph.add_edge(
node1,
node2,
start=node1.isite,
end=node2.isite,
delta=conn,
ligands=ligands,
)
self.environment_subgraphs[envs_string] = self._environment_subgraph
def setup_connectivity_description(self):
"""
Returns:
"""
def get_connected_components(self, environments_symbols=None, only_atoms=None):
"""
Args:
environments_symbols ():
only_atoms ():
Returns:
"""
connected_components = []
env_subgraph = self.environment_subgraph(environments_symbols=environments_symbols, only_atoms=only_atoms)
for component_nodes in nx.connected_components(env_subgraph):
graph = env_subgraph.subgraph(component_nodes).copy()
connected_components.append(ConnectedComponent.from_graph(graph))
return connected_components
def setup_atom_environment_subgraph(self, atom_environment):
"""
Args:
atom_environment ():
Returns:
"""
raise NotImplementedError()
def setup_environments_subgraph(self, environments_symbols):
"""
Args:
environments_symbols ():
Returns:
"""
raise NotImplementedError()
def setup_atom_environments_subgraph(self, atoms_environments):
"""
Args:
atoms_environments ():
Returns:
"""
raise NotImplementedError()
def print_links(self):
"""
Returns:
"""
nodes = self.environment_subgraph().nodes()
print("Links in graph :")
for node in nodes:
print(node.isite, " is connected with : ")
for (n1, n2, data) in self.environment_subgraph().edges(node, data=True):
if n1.isite == data["start"]:
print(
" - {:d} by {:d} ligands ({:d} {:d} {:d})".format(
n2.isite,
len(data["ligands"]),
data["delta"][0],
data["delta"][1],
data["delta"][2],
)
)
else:
print(
" - {:d} by {:d} ligands ({:d} {:d} {:d})".format(
n2.isite,
len(data["ligands"]),
-data["delta"][0],
-data["delta"][1],
-data["delta"][2],
)
)
def as_dict(self):
"""
Returns:
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"light_structure_environments": self.light_structure_environments.as_dict(),
"connectivity_graph": jsanitize(nx.to_dict_of_dicts(self._graph)),
"environment_subgraphs": {
env_key: jsanitize(nx.to_dict_of_dicts(subgraph))
for env_key, subgraph in self.environment_subgraphs.items()
},
}
@classmethod
def from_dict(cls, d):
"""
Args:
d ():
Returns:
"""
# Reconstructs the graph with integer as nodes (json's as_dict replaces integer keys with str keys)
cgraph = nx.from_dict_of_dicts(d["connectivity_graph"], create_using=nx.MultiGraph, multigraph_input=True)
cgraph = nx.relabel_nodes(cgraph, int) # Just relabel the nodes using integer casting (maps str->int)
# Relabel multiedges (removes multiedges with str keys and adds them back with int keys)
edges = set(cgraph.edges())
for n1, n2 in edges:
new_edges = {int(iedge): edata for iedge, edata in cgraph[n1][n2].items()}
cgraph.remove_edges_from([(n1, n2, iedge) for iedge, edata in cgraph[n1][n2].items()])
cgraph.add_edges_from([(n1, n2, iedge, edata) for iedge, edata in new_edges.items()])
return cls(
LightStructureEnvironments.from_dict(d["light_structure_environments"]),
connectivity_graph=cgraph,
environment_subgraphs=None,
)
# TODO: also deserialize the environment_subgraphs
# environment_subgraphs={env_key: nx.from_dict_of_dicts(subgraph, multigraph_input=True)
# for env_key, subgraph in d['environment_subgraphs'].items()})
|
materialsproject/pymatgen
|
pymatgen/analysis/chemenv/connectivity/structure_connectivity.py
|
Python
|
mit
| 16,317
|
[
"pymatgen"
] |
432c8a285d4a364afdc24300d96721d90859763464adb08452ef7b239ee14743
|
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import base64
import datetime
import imp
import json
import os
import shlex
import zipfile
import re
from io import BytesIO
from ansible.release import __version__, __author__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.powershell import module_manifest as ps_manifest
from ansible.module_utils._text import to_bytes, to_text, to_native
from ansible.plugins.loader import module_utils_loader
# Must import strategy and use write_locks from there
# If we import write_locks directly then we end up binding a
# variable to the object and then it never gets updated.
from ansible.executor import action_write_locks
from ansible.utils.display import Display
display = Display()
REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\""
REPLACER_COMPLEX = b"\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
REPLACER_WINDOWS = b"# POWERSHELL_COMMON"
REPLACER_JSONARGS = b"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
REPLACER_SELINUX = b"<<SELINUX_SPECIAL_FILESYSTEMS>>"
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
ENCODING_STRING = u'# -*- coding: utf-8 -*-'
b_ENCODING_STRING = b'# -*- coding: utf-8 -*-'
# module_common is relative to module_utils, so fix the path
_MODULE_UTILS_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
# ******************************************************************************
ANSIBALLZ_TEMPLATE = u'''%(shebang)s
%(coding)s
_ANSIBALLZ_WRAPPER = True # For test-module script to tell this is a ANSIBALLZ_WRAPPER
# This code is part of Ansible, but is an independent component.
# The code in this particular templatable string, and this templatable string
# only, is BSD licensed. Modules which end up using this snippet, which is
# dynamically combined together by Ansible still belong to the author of the
# module, and they may assign their own license to the complete work.
#
# Copyright (c), James Cammarata, 2016
# Copyright (c), Toshio Kuratomi, 2016
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def _ansiballz_main():
import os
import os.path
import sys
import __main__
# For some distros and python versions we pick up this script in the temporary
# directory. This leads to problems when the ansible module masks a python
# library that another import needs. We have not figured out what about the
# specific distros and python versions causes this to behave differently.
#
# Tested distros:
# Fedora23 with python3.4 Works
# Ubuntu15.10 with python2.7 Works
# Ubuntu15.10 with python3.4 Fails without this
# Ubuntu16.04.1 with python3.5 Fails without this
# To test on another platform:
# * use the copy module (since this shadows the stdlib copy module)
# * Turn off pipelining
# * Make sure that the destination file does not exist
# * ansible ubuntu16-test -m copy -a 'src=/etc/motd dest=/var/tmp/m'
# This will traceback in shutil. Looking at the complete traceback will show
# that shutil is importing copy which finds the ansible module instead of the
# stdlib module
scriptdir = None
try:
scriptdir = os.path.dirname(os.path.realpath(__main__.__file__))
except (AttributeError, OSError):
# Some platforms don't set __file__ when reading from stdin
# OSX raises OSError if using abspath() in a directory we don't have
# permission to read (realpath calls abspath)
pass
if scriptdir is not None:
sys.path = [p for p in sys.path if p != scriptdir]
import base64
import imp
import shutil
import tempfile
import zipfile
if sys.version_info < (3,):
bytes = str
MOD_DESC = ('.py', 'U', imp.PY_SOURCE)
PY3 = False
else:
unicode = str
MOD_DESC = ('.py', 'r', imp.PY_SOURCE)
PY3 = True
ZIPDATA = """%(zipdata)s"""
# Note: temp_path isn't needed once we switch to zipimport
def invoke_module(modlib_path, temp_path, json_params):
# When installed via setuptools (including python setup.py install),
# ansible may be installed with an easy-install.pth file. That file
# may load the system-wide install of ansible rather than the one in
# the module. sitecustomize is the only way to override that setting.
z = zipfile.ZipFile(modlib_path, mode='a')
# py3: modlib_path will be text, py2: it's bytes. Need bytes at the end
sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% modlib_path
sitecustomize = sitecustomize.encode('utf-8')
# Use a ZipInfo to work around zipfile limitation on hosts with
# clocks set to a pre-1980 year (for instance, Raspberry Pi)
zinfo = zipfile.ZipInfo()
zinfo.filename = 'sitecustomize.py'
zinfo.date_time = ( %(year)i, %(month)i, %(day)i, %(hour)i, %(minute)i, %(second)i)
z.writestr(zinfo, sitecustomize)
# Note: Remove the following section when we switch to zipimport
# Write the module to disk for imp.load_module
module = os.path.join(temp_path, '__main__.py')
with open(module, 'wb') as f:
f.write(z.read('__main__.py'))
f.close()
# End pre-zipimport section
z.close()
# Put the zipped up module_utils we got from the controller first in the python path so that we
# can monkeypatch the right basic
sys.path.insert(0, modlib_path)
# Monkeypatch the parameters into basic
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = json_params
%(coverage)s
# Run the module! By importing it as '__main__', it thinks it is executing as a script
with open(module, 'rb') as mod:
imp.load_module('__main__', mod, module, MOD_DESC)
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
sys.exit(1)
def debug(command, zipped_mod, json_params):
# The code here normally doesn't run. It's only used for debugging on the
# remote machine.
#
# The subcommands in this function make it easier to debug ansiballz
# modules. Here's the basic steps:
#
# Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv
# to save the module file remotely::
# $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv
#
# Part of the verbose output will tell you where on the remote machine the
# module was written to::
# [...]
# <host1> SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o
# PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o
# ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
# LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"''
# [...]
#
# Login to the remote machine and run the module file via from the previous
# step with the explode subcommand to extract the module payload into
# source files::
# $ ssh host1
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode
# Module expanded into:
# /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible
#
# You can now edit the source files to instrument the code or experiment with
# different parameter values. When you're ready to run the code you've modified
# (instead of the code from the actual zipped module), use the execute subcommand like this::
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute
# Okay to use __file__ here because we're running from a kept file
basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')
args_path = os.path.join(basedir, 'args')
script_path = os.path.join(basedir, '__main__.py')
if command == 'excommunicate':
print('The excommunicate debug command is deprecated and will be removed in 2.11. Use execute instead.')
command = 'execute'
if command == 'explode':
# transform the ZIPDATA into an exploded directory of code and then
# print the path to the code. This is an easy way for people to look
# at the code on the remote machine for debugging it in that
# environment
z = zipfile.ZipFile(zipped_mod)
for filename in z.namelist():
if filename.startswith('/'):
raise Exception('Something wrong with this module zip file: should not contain absolute paths')
dest_filename = os.path.join(basedir, filename)
if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):
os.makedirs(dest_filename)
else:
directory = os.path.dirname(dest_filename)
if not os.path.exists(directory):
os.makedirs(directory)
f = open(dest_filename, 'wb')
f.write(z.read(filename))
f.close()
# write the args file
f = open(args_path, 'wb')
f.write(json_params)
f.close()
print('Module expanded into:')
print('%%s' %% basedir)
exitcode = 0
elif command == 'execute':
# Execute the exploded code instead of executing the module from the
# embedded ZIPDATA. This allows people to easily run their modified
# code on the remote machine to see how changes will affect it.
# Set pythonpath to the debug dir
sys.path.insert(0, basedir)
# read in the args file which the user may have modified
with open(args_path, 'rb') as f:
json_params = f.read()
# Monkeypatch the parameters into basic
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = json_params
# Run the module! By importing it as '__main__', it thinks it is executing as a script
import imp
with open(script_path, 'r') as f:
importer = imp.load_module('__main__', f, script_path, ('.py', 'r', imp.PY_SOURCE))
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
sys.exit(1)
else:
print('WARNING: Unknown debug command. Doing nothing.')
exitcode = 0
return exitcode
#
# See comments in the debug() method for information on debugging
#
ANSIBALLZ_PARAMS = %(params)s
if PY3:
ANSIBALLZ_PARAMS = ANSIBALLZ_PARAMS.encode('utf-8')
try:
# There's a race condition with the controller removing the
# remote_tmpdir and this module executing under async. So we cannot
# store this in remote_tmpdir (use system tempdir instead)
# Only need to use [ansible_module]_payload_ in the temp_path until we move to zipimport
# (this helps ansible-test produce coverage stats)
temp_path = tempfile.mkdtemp(prefix='ansible_%(ansible_module)s_payload_')
zipped_mod = os.path.join(temp_path, 'ansible_%(ansible_module)s_payload.zip')
with open(zipped_mod, 'wb') as modlib:
modlib.write(base64.b64decode(ZIPDATA))
if len(sys.argv) == 2:
exitcode = debug(sys.argv[1], zipped_mod, ANSIBALLZ_PARAMS)
else:
# Note: temp_path isn't needed once we switch to zipimport
invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
finally:
try:
shutil.rmtree(temp_path)
except (NameError, OSError):
# tempdir creation probably failed
pass
sys.exit(exitcode)
if __name__ == '__main__':
_ansiballz_main()
'''
ANSIBALLZ_COVERAGE_TEMPLATE = '''
# Access to the working directory is required by coverage.
# Some platforms, such as macOS, may not allow querying the working directory when using become to drop privileges.
try:
os.getcwd()
except OSError:
os.chdir('/')
os.environ['COVERAGE_FILE'] = '%(coverage_output)s'
import atexit
import coverage
cov = coverage.Coverage(config_file='%(coverage_config)s')
def atexit_coverage():
cov.stop()
cov.save()
atexit.register(atexit_coverage)
cov.start()
'''
def _strip_comments(source):
# Strip comments and blank lines from the wrapper
buf = []
for line in source.splitlines():
l = line.strip()
if not l or l.startswith(u'#'):
continue
buf.append(line)
return u'\n'.join(buf)
if C.DEFAULT_KEEP_REMOTE_FILES:
# Keep comments when KEEP_REMOTE_FILES is set. That way users will see
# the comments with some nice usage instructions
ACTIVE_ANSIBALLZ_TEMPLATE = ANSIBALLZ_TEMPLATE
else:
# ANSIBALLZ_TEMPLATE stripped of comments for smaller over the wire size
ACTIVE_ANSIBALLZ_TEMPLATE = _strip_comments(ANSIBALLZ_TEMPLATE)
class ModuleDepFinder(ast.NodeVisitor):
# Caveats:
# This code currently does not handle:
# * relative imports from py2.6+ from . import urls
IMPORT_PREFIX_SIZE = len('ansible.module_utils.')
def __init__(self, *args, **kwargs):
"""
Walk the ast tree for the python module.
Save submodule[.submoduleN][.identifier] into self.submodules
self.submodules will end up with tuples like:
- ('basic',)
- ('urls', 'fetch_url')
- ('database', 'postgres')
- ('database', 'postgres', 'quote')
It's up to calling code to determine whether the final element of the
dotted strings are module names or something else (function, class, or
variable names)
"""
super(ModuleDepFinder, self).__init__(*args, **kwargs)
self.submodules = set()
def visit_Import(self, node):
# import ansible.module_utils.MODLIB[.MODLIBn] [as asname]
for alias in (a for a in node.names if a.name.startswith('ansible.module_utils.')):
py_mod = alias.name[self.IMPORT_PREFIX_SIZE:]
py_mod = tuple(py_mod.split('.'))
self.submodules.add(py_mod)
self.generic_visit(node)
def visit_ImportFrom(self, node):
# Specialcase: six is a special case because of its
# import logic
if node.names[0].name == '_six':
self.submodules.add(('_six',))
elif node.module.startswith('ansible.module_utils'):
where_from = node.module[self.IMPORT_PREFIX_SIZE:]
if where_from:
# from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname]
py_mod = tuple(where_from.split('.'))
for alias in node.names:
self.submodules.add(py_mod + (alias.name,))
else:
# from ansible.module_utils import MODLIB [,MODLIB2] [as asname]
for alias in node.names:
self.submodules.add((alias.name,))
self.generic_visit(node)
def _slurp(path):
if not os.path.exists(path):
raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path))
fd = open(path, 'rb')
data = fd.read()
fd.close()
return data
def _get_shebang(interpreter, task_vars, templar, args=tuple()):
"""
Note not stellar API:
Returns None instead of always returning a shebang line. Doing it this
way allows the caller to decide to use the shebang it read from the
file rather than trust that we reformatted what they already have
correctly.
"""
interpreter_config = u'ansible_%s_interpreter' % os.path.basename(interpreter).strip()
if interpreter_config not in task_vars:
return (None, interpreter)
interpreter = templar.template(task_vars[interpreter_config].strip())
shebang = u'#!' + interpreter
if args:
shebang = shebang + u' ' + u' '.join(args)
return (shebang, interpreter)
def recursive_finder(name, data, py_module_names, py_module_cache, zf):
"""
Using ModuleDepFinder, make sure we have all of the module_utils files that
the module its module_utils files needs.
"""
# Parse the module and find the imports of ansible.module_utils
try:
tree = ast.parse(data)
except (SyntaxError, IndentationError) as e:
raise AnsibleError("Unable to import %s due to %s" % (name, e.msg))
finder = ModuleDepFinder()
finder.visit(tree)
#
# Determine what imports that we've found are modules (vs class, function.
# variable names) for packages
#
normalized_modules = set()
# Loop through the imports that we've found to normalize them
# Exclude paths that match with paths we've already processed
# (Have to exclude them a second time once the paths are processed)
module_utils_paths = [p for p in module_utils_loader._get_paths(subdirs=False) if os.path.isdir(p)]
module_utils_paths.append(_MODULE_UTILS_PATH)
for py_module_name in finder.submodules.difference(py_module_names):
module_info = None
if py_module_name[0] == 'six':
# Special case the python six library because it messes up the
# import process in an incompatible way
module_info = imp.find_module('six', module_utils_paths)
py_module_name = ('six',)
idx = 0
elif py_module_name[0] == '_six':
# Special case the python six library because it messes up the
# import process in an incompatible way
module_info = imp.find_module('_six', [os.path.join(p, 'six') for p in module_utils_paths])
py_module_name = ('six', '_six')
idx = 0
else:
# Check whether either the last or the second to last identifier is
# a module name
for idx in (1, 2):
if len(py_module_name) < idx:
break
try:
module_info = imp.find_module(py_module_name[-idx],
[os.path.join(p, *py_module_name[:-idx]) for p in module_utils_paths])
break
except ImportError:
continue
# Could not find the module. Construct a helpful error message.
if module_info is None:
msg = ['Could not find imported module support code for %s. Looked for' % (name,)]
if idx == 2:
msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
# Found a byte compiled file rather than source. We cannot send byte
# compiled over the wire as the python version might be different.
# imp.find_module seems to prefer to return source packages so we just
# error out if imp.find_module returns byte compiled files (This is
# fragile as it depends on undocumented imp.find_module behaviour)
if module_info[2][2] not in (imp.PY_SOURCE, imp.PKG_DIRECTORY):
msg = ['Could not find python source for imported module support code for %s. Looked for' % name]
if idx == 2:
msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
if idx == 2:
# We've determined that the last portion was an identifier and
# thus, not part of the module name
py_module_name = py_module_name[:-1]
# If not already processed then we've got work to do
# If not in the cache, then read the file into the cache
# We already have a file handle for the module open so it makes
# sense to read it now
if py_module_name not in py_module_cache:
if module_info[2][2] == imp.PKG_DIRECTORY:
# Read the __init__.py instead of the module file as this is
# a python package
normalized_name = py_module_name + ('__init__',)
if normalized_name not in py_module_names:
normalized_path = os.path.join(os.path.join(module_info[1], '__init__.py'))
normalized_data = _slurp(normalized_path)
py_module_cache[normalized_name] = (normalized_data, normalized_path)
normalized_modules.add(normalized_name)
else:
normalized_name = py_module_name
if normalized_name not in py_module_names:
normalized_path = module_info[1]
normalized_data = module_info[0].read()
module_info[0].close()
py_module_cache[normalized_name] = (normalized_data, normalized_path)
normalized_modules.add(normalized_name)
# Make sure that all the packages that this module is a part of
# are also added
for i in range(1, len(py_module_name)):
py_pkg_name = py_module_name[:-i] + ('__init__',)
if py_pkg_name not in py_module_names:
pkg_dir_info = imp.find_module(py_pkg_name[-1],
[os.path.join(p, *py_pkg_name[:-1]) for p in module_utils_paths])
normalized_modules.add(py_pkg_name)
py_module_cache[py_pkg_name] = (_slurp(pkg_dir_info[1]), pkg_dir_info[1])
# FIXME: Currently the AnsiBallZ wrapper monkeypatches module args into a global
# variable in basic.py. If a module doesn't import basic.py, then the AnsiBallZ wrapper will
# traceback when it tries to monkypatch. So, for now, we have to unconditionally include
# basic.py.
#
# In the future we need to change the wrapper to monkeypatch the args into a global variable in
# their own, separate python module. That way we won't require basic.py. Modules which don't
# want basic.py can import that instead. AnsibleModule will need to change to import the vars
# from the separate python module and mirror the args into its global variable for backwards
# compatibility.
if ('basic',) not in py_module_names:
pkg_dir_info = imp.find_module('basic', module_utils_paths)
normalized_modules.add(('basic',))
py_module_cache[('basic',)] = (_slurp(pkg_dir_info[1]), pkg_dir_info[1])
# End of AnsiballZ hack
#
# iterate through all of the ansible.module_utils* imports that we haven't
# already checked for new imports
#
# set of modules that we haven't added to the zipfile
unprocessed_py_module_names = normalized_modules.difference(py_module_names)
for py_module_name in unprocessed_py_module_names:
py_module_path = os.path.join(*py_module_name)
py_module_file_name = '%s.py' % py_module_path
zf.writestr(os.path.join("ansible/module_utils",
py_module_file_name), py_module_cache[py_module_name][0])
display.vvvvv("Using module_utils file %s" % py_module_cache[py_module_name][1])
# Add the names of the files we're scheduling to examine in the loop to
# py_module_names so that we don't re-examine them in the next pass
# through recursive_finder()
py_module_names.update(unprocessed_py_module_names)
for py_module_file in unprocessed_py_module_names:
recursive_finder(py_module_file, py_module_cache[py_module_file][0], py_module_names, py_module_cache, zf)
# Save memory; the file won't have to be read again for this ansible module.
del py_module_cache[py_module_file]
def _is_binary(b_module_data):
textchars = bytearray(set([7, 8, 9, 10, 12, 13, 27]) | set(range(0x20, 0x100)) - set([0x7f]))
start = b_module_data[:1024]
return bool(start.translate(None, textchars))
def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression, async_timeout, become,
become_method, become_user, become_password, become_flags, environment):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_substyle = module_style = 'old'
# module_style is something important to calling code (ActionBase). It
# determines how arguments are formatted (json vs k=v) and whether
# a separate arguments file needs to be sent over the wire.
# module_substyle is extra information that's useful internally. It tells
# us what we have to look to substitute in the module files and whether
# we're using module replacer or ansiballz to format the module itself.
if _is_binary(b_module_data):
module_substyle = module_style = 'binary'
elif REPLACER in b_module_data:
# Do REPLACER before from ansible.module_utils because we need make sure
# we substitute "from ansible.module_utils basic" for REPLACER
module_style = 'new'
module_substyle = 'python'
b_module_data = b_module_data.replace(REPLACER, b'from ansible.module_utils.basic import *')
elif b'from ansible.module_utils.' in b_module_data:
module_style = 'new'
module_substyle = 'python'
elif REPLACER_WINDOWS in b_module_data:
module_style = 'new'
module_substyle = 'powershell'
b_module_data = b_module_data.replace(REPLACER_WINDOWS, b'#Requires -Module Ansible.ModuleUtils.Legacy')
elif re.search(b'#Requires -Module', b_module_data, re.IGNORECASE) \
or re.search(b'#Requires -Version', b_module_data, re.IGNORECASE)\
or re.search(b'#AnsibleRequires -OSVersion', b_module_data, re.IGNORECASE) \
or re.search(b'#AnsibleRequires -CSharpUtil', b_module_data, re.IGNORECASE):
module_style = 'new'
module_substyle = 'powershell'
elif REPLACER_JSONARGS in b_module_data:
module_style = 'new'
module_substyle = 'jsonargs'
elif b'WANT_JSON' in b_module_data:
module_substyle = module_style = 'non_native_want_json'
shebang = None
# Neither old-style, non_native_want_json nor binary modules should be modified
# except for the shebang line (Done by modify_module)
if module_style in ('old', 'non_native_want_json', 'binary'):
return b_module_data, module_style, shebang
output = BytesIO()
py_module_names = set()
if module_substyle == 'python':
params = dict(ANSIBLE_MODULE_ARGS=module_args,)
python_repred_params = repr(json.dumps(params))
try:
compression_method = getattr(zipfile, module_compression)
except AttributeError:
display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression)
compression_method = zipfile.ZIP_STORED
lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache')
cached_module_filename = os.path.join(lookup_path, "%s-%s" % (module_name, module_compression))
zipdata = None
# Optimization -- don't lock if the module has already been cached
if os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: using cached module: %s' % cached_module_filename)
with open(cached_module_filename, 'rb') as module_data:
zipdata = module_data.read()
else:
if module_name in action_write_locks.action_write_locks:
display.debug('ANSIBALLZ: Using lock for %s' % module_name)
lock = action_write_locks.action_write_locks[module_name]
else:
# If the action plugin directly invokes the module (instead of
# going through a strategy) then we don't have a cross-process
# Lock specifically for this module. Use the "unexpected
# module" lock instead
display.debug('ANSIBALLZ: Using generic lock for %s' % module_name)
lock = action_write_locks.action_write_locks[None]
display.debug('ANSIBALLZ: Acquiring lock')
with lock:
display.debug('ANSIBALLZ: Lock acquired: %s' % id(lock))
# Check that no other process has created this while we were
# waiting for the lock
if not os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: Creating module')
# Create the module zip data
zipoutput = BytesIO()
zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
# Note: If we need to import from release.py first,
# remember to catch all exceptions: https://github.com/ansible/ansible/issues/16523
zf.writestr('ansible/__init__.py',
b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n__version__="' +
to_bytes(__version__) + b'"\n__author__="' +
to_bytes(__author__) + b'"\n')
zf.writestr('ansible/module_utils/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n')
zf.writestr('__main__.py', b_module_data)
py_module_cache = {('__init__',): (b'', '[builtin]')}
recursive_finder(module_name, b_module_data, py_module_names, py_module_cache, zf)
zf.close()
zipdata = base64.b64encode(zipoutput.getvalue())
# Write the assembled module to a temp file (write to temp
# so that no one looking for the file reads a partially
# written file)
if not os.path.exists(lookup_path):
# Note -- if we have a global function to setup, that would
# be a better place to run this
os.makedirs(lookup_path)
display.debug('ANSIBALLZ: Writing module')
with open(cached_module_filename + '-part', 'wb') as f:
f.write(zipdata)
# Rename the file into its final position in the cache so
# future users of this module can read it off the
# filesystem instead of constructing from scratch.
display.debug('ANSIBALLZ: Renaming module')
os.rename(cached_module_filename + '-part', cached_module_filename)
display.debug('ANSIBALLZ: Done creating module')
if zipdata is None:
display.debug('ANSIBALLZ: Reading module after lock')
# Another process wrote the file while we were waiting for
# the write lock. Go ahead and read the data from disk
# instead of re-creating it.
try:
zipdata = open(cached_module_filename, 'rb').read()
except IOError:
raise AnsibleError('A different worker process failed to create module file. '
'Look at traceback for that process for debugging information.')
zipdata = to_text(zipdata, errors='surrogate_or_strict')
shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars, templar)
if shebang is None:
shebang = u'#!/usr/bin/python'
# Enclose the parts of the interpreter in quotes because we're
# substituting it into the template as a Python string
interpreter_parts = interpreter.split(u' ')
interpreter = u"'{0}'".format(u"', '".join(interpreter_parts))
coverage_config = os.environ.get('_ANSIBLE_COVERAGE_CONFIG')
if coverage_config:
# Enable code coverage analysis of the module.
# This feature is for internal testing and may change without notice.
coverage = ANSIBALLZ_COVERAGE_TEMPLATE % dict(
coverage_config=coverage_config,
coverage_output=os.environ['_ANSIBLE_COVERAGE_OUTPUT']
)
else:
coverage = ''
now = datetime.datetime.utcnow()
output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict(
zipdata=zipdata,
ansible_module=module_name,
params=python_repred_params,
shebang=shebang,
interpreter=interpreter,
coding=ENCODING_STRING,
year=now.year,
month=now.month,
day=now.day,
hour=now.hour,
minute=now.minute,
second=now.second,
coverage=coverage,
)))
b_module_data = output.getvalue()
elif module_substyle == 'powershell':
# Powershell/winrm don't actually make use of shebang so we can
# safely set this here. If we let the fallback code handle this
# it can fail in the presence of the UTF8 BOM commonly added by
# Windows text editors
shebang = u'#!powershell'
# create the common exec wrapper payload and set that as the module_data
# bytes
b_module_data = ps_manifest._create_powershell_wrapper(
b_module_data, module_args, environment, async_timeout, become,
become_method, become_user, become_password, become_flags,
module_substyle
)
elif module_substyle == 'jsonargs':
module_args_json = to_bytes(json.dumps(module_args))
# these strings could be included in a third-party module but
# officially they were included in the 'basic' snippet for new-style
# python modules (which has been replaced with something else in
# ansiballz) If we remove them from jsonargs-style module replacer
# then we can remove them everywhere.
python_repred_args = to_bytes(repr(module_args_json))
b_module_data = b_module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
b_module_data = b_module_data.replace(REPLACER_COMPLEX, python_repred_args)
b_module_data = b_module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))
# The main event -- substitute the JSON args string into the module
b_module_data = b_module_data.replace(REPLACER_JSONARGS, module_args_json)
facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='surrogate_or_strict')
b_module_data = b_module_data.replace(b'syslog.LOG_USER', facility)
return (b_module_data, module_style, shebang)
def modify_module(module_name, module_path, module_args, templar, task_vars=None, module_compression='ZIP_STORED', async_timeout=0, become=False,
become_method=None, become_user=None, become_password=None, become_flags=None, environment=None):
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
a non-bootstrapping scenario by not moving extra files over the wire and
also takes care of embedding arguments in the transferred modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
... will result in the insertion of basic.py into the module
from the module_utils/ directory in the source tree.
For powershell, this code effectively no-ops, as the exec wrapper requires access to a number of
properties not available here.
"""
task_vars = {} if task_vars is None else task_vars
environment = {} if environment is None else environment
with open(module_path, 'rb') as f:
# read in the module source
b_module_data = f.read()
(b_module_data, module_style, shebang) = _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression,
async_timeout=async_timeout, become=become, become_method=become_method,
become_user=become_user, become_password=become_password, become_flags=become_flags,
environment=environment)
if module_style == 'binary':
return (b_module_data, module_style, to_text(shebang, nonstring='passthru'))
elif shebang is None:
b_lines = b_module_data.split(b"\n", 1)
if b_lines[0].startswith(b"#!"):
b_shebang = b_lines[0].strip()
# shlex.split on python-2.6 needs bytes. On python-3.x it needs text
args = shlex.split(to_native(b_shebang[2:], errors='surrogate_or_strict'))
# _get_shebang() takes text strings
args = [to_text(a, errors='surrogate_or_strict') for a in args]
interpreter = args[0]
b_new_shebang = to_bytes(_get_shebang(interpreter, task_vars, templar, args[1:])[0],
errors='surrogate_or_strict', nonstring='passthru')
if b_new_shebang:
b_lines[0] = b_shebang = b_new_shebang
if os.path.basename(interpreter).startswith(u'python'):
b_lines.insert(1, b_ENCODING_STRING)
shebang = to_text(b_shebang, nonstring='passthru', errors='surrogate_or_strict')
else:
# No shebang, assume a binary module?
pass
b_module_data = b"\n".join(b_lines)
return (b_module_data, module_style, shebang)
|
dlazz/ansible
|
lib/ansible/executor/module_common.py
|
Python
|
gpl-3.0
| 41,164
|
[
"VisIt"
] |
09c6b457894c5f198e5ec99769e6908b1b09f5f6930ccf37d0ae3f60b916a590
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import sys
from six.moves import builtins
from ansible import constants as C
from ansible.plugins import filter_loader, test_loader
def safe_eval(expr, locals={}, include_exceptions=False):
'''
This is intended for allowing things like:
with_items: a_list_variable
Where Jinja2 would return a string but we do not want to allow it to
call functions (outside of Jinja2, where the env is constrained). If
the input data to this function came from an untrusted (remote) source,
it should first be run through _clean_data_struct() to ensure the data
is further sanitized prior to evaluation.
Based on:
http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
'''
# this is the whitelist of AST nodes we are going to
# allow in the evaluation. Any node type other than
# those listed here will raise an exception in our custom
# visitor class defined below.
SAFE_NODES = set(
(
ast.Add,
ast.BinOp,
ast.Call,
ast.Compare,
ast.Dict,
ast.Div,
ast.Expression,
ast.List,
ast.Load,
ast.Mult,
ast.Num,
ast.Name,
ast.Str,
ast.Sub,
ast.Tuple,
ast.UnaryOp,
)
)
# AST node types were expanded after 2.6
if not sys.version.startswith('2.6'):
SAFE_NODES.union(
set(
(ast.Set,)
)
)
filter_list = []
for filter in filter_loader.all():
filter_list.extend(filter.filters().keys())
test_list = []
for test in test_loader.all():
test_list.extend(test.tests().keys())
CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
if type(node) not in SAFE_NODES:
raise Exception("invalid expression (%s)" % expr)
elif isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Name) and inside_call:
if hasattr(builtins, node.id) and node.id not in CALL_WHITELIST:
raise Exception("invalid function: %s" % node.id)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call)
if not isinstance(expr, basestring):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (expr, None)
return expr
cnv = CleansingNodeVisitor()
try:
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
compiled = compile(parsed_tree, expr, 'eval')
result = eval(compiled, {}, dict(locals))
if include_exceptions:
return (result, None)
else:
return result
except SyntaxError as e:
# special handling for syntax errors, we just return
# the expression string back as-is
if include_exceptions:
return (expr, None)
return expr
except Exception as e:
if include_exceptions:
return (expr, e)
return expr
|
jaddison/ansible
|
lib/ansible/template/safe_eval.py
|
Python
|
gpl-3.0
| 4,160
|
[
"VisIt"
] |
640c3a818fe68523ccbe794289f335a846b2995a8beeab2be74ba58b2faaa43c
|
'''
WebdriverWrapper Module
This module wraps Selenium Webdriver, and provides a cleaner, more consistent interface. It also provides error
handling, and creates a more deterministic tool for web automation
'''
import httplib
import logging
import signal
import os
from urlparse import urlparse, urljoin
from selenium.common.exceptions import TimeoutException, NoAlertPresentException, UnexpectedAlertPresentException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from coyote_framework.webdriver.webdriverwrapper import WebElementWrapper
from coyote_framework.webdriver.webdriverwrapper.exceptions import WebDriverWrapperException, WebDriverTimeoutException, \
PageTimeoutException
from coyote_framework.webdriver.webdriverwrapper.support import LocatorHandler as LH, staticreader
from coyote_framework.webdriver.webdriverwrapper.support.locator import Locator
from coyote_framework.webdriver.webdriverwrapper.support import WebDriverWrapperAssertion as Assertion
from coyote_framework.webdriver.webdriverwrapper.support import JavascriptExecutor as JE
BROWSER_LOG_LEVEL_INFO = u'INFO'
BROWSER_LOG_LEVEL_DEBUG = u'DEBUG'
BROWSER_LOG_LEVEL_WARNING = u'WARNING'
BROWSER_LOG_LEVEL_SEVERE = u'SEVERE'
class quitting(object):
"""Context for webdriver to quit on exit
Usage:
>>> with quitting(webdriver.Firefox()) as driver:
>>> driver.get('http://google.com')
"""
def __init__(self, driver):
self.driver = driver
def __enter__(self):
"""@rtype: WebDriverWrapper"""
return self.driver
def __exit__(self, *args, **kwargs):
self.driver.quit()
class WebDriverWrapper(object):
"""
WebdriverWrapper Module
This module wraps Selenium Webdriver, and provides a cleaner, more consistent interface. It also provides error
handling, and creates a more deterministic tool for web automation
"""
class Data(object):
"""Generic holder for data tied to the driver"""
pass
def __init__(self, driver, options=None, display=None, *args, **kwargs):
"""
@type driver: RemoteWebDriver
"""
logger = logging.getLogger(__name__)
if options is None:
options = {}
options.update(kwargs)
self.driver = driver
try:
self.driver_pid = driver.binary.process.pid
except AttributeError:
self.driver_pid = None
logger.debug('WebDriver server url is: {}'.format(self.get_server_url()))
logger.debug('WebDriver browser pid is: {}'.format(self.driver_pid))
self.display = display
self.display_pid = display.pid if display else None
logger.debug('WebDriver display pid is: {}'.format(self.driver_pid))
self.implicit_wait = options['implicit_wait'] if 'implicit_wait' in options else 1
self.timeout = options['timeout'] if 'timeout' in options else 45
self.user_wait_timeout = options['user_wait_timeout'] if 'user_wait_timeout' in options else 180
self.find_attempts = options['find_attempts'] if 'find_attempts' in options else 2
self.maximize_window = options['maximize_window'] if 'maximize_window' in options else False
self.page_load_timeout = options['page_load_timeout'] if 'page_load_timeout' in options else self.timeout
self.ignore_page_load_timeouts = options['ignore_page_load_timeouts'] if 'ignore_page_load_timeouts' in options else False
self.browser_logs = []
self.locator_handler = LH.LocatorHandler
self.js_executor = JE.JavascriptExecutor(self)
self.assertion = Assertion.WebDriverWrapperAssertion(self, self.timeout, self.implicit_wait)
self.action_callbacks = options.get('action_callbacks') or [] # Functions to call at the end of each action
self.paused = False
# configure driver based on settings
self.driver.implicitly_wait(self.implicit_wait)
self.driver.set_page_load_timeout(self.page_load_timeout)
if self.maximize_window is True:
self.driver.maximize_window()
def __str__(self):
message = "<WebDriverWrapper: "
try:
message += "timeout: " + str(self.timeout) + ", "
message += "implicit_wait: " + str(self.implicit_wait) + ", "
message += "find_attempts: " + str(self.find_attempts) + " "
except Exception:
message += ' -- (properties omitted)'
finally:
message += ">"
return message
def __repr__(self):
"""
Return self as string for repr
"""
return self.__str__()
def wrap_driver(self, driver):
"""
@type driver webdriver
"""
self.driver = driver
def get_server_url(self):
"""Returns the url of the standalone server used for remote web connections urls
@return: Selenium server url
"""
return self.driver.command_executor._url
def get_port(self):
"""Gets the port of the command executor
@rtype: int
@return: Port
"""
return self.driver.command_executor.profile.port
def get_profile_dir(self):
"""The path of the command executor's profile dir
@return: Temp directory
"""
return self.driver.command_executor.profile.path
def get_profile_file(self):
"""The path of the command executor's profile
@return: File path
"""
return self.driver.command_executor.profile.userPrefs
#
# WebDriver Properties
#
def current_url(self):
"""
@return current URL
"""
return self.execute_and_handle_webdriver_exceptions(lambda *args, **kwargs: self.driver.current_url)
def name(self):
"""
@return name of driver
"""
return self.execute_and_handle_webdriver_exceptions(lambda *args, **kwargs: self.driver.name)
def page_source(self):
"""
@return Source of the current page
"""
return self.execute_and_handle_webdriver_exceptions(lambda *args, **kwargs: self.driver.page_source)
def title(self):
"""
@return <title> content of the current page
"""
return self.execute_and_handle_webdriver_exceptions(lambda *args, **kwargs: self.driver.title)
#
# WebDriver Navigation
#
def get(self, url):
"""
Alias for 'visit' method; use 'visit' please
url -- An absolute or relative url stored as a string
"""
return self.visit(url)
def visit(self, url=''):
"""
Driver gets the provided url in the browser, returns True if successful
url -- An absolute or relative url stored as a string
"""
def _visit(url):
if len(url) > 0 and url[0] == '/':
# url's first character is a forward slash; treat as relative path
path = url
full_url = self.driver.current_url
parsed_url = urlparse(full_url)
base_url = str(parsed_url.scheme) + '://' + str(parsed_url.netloc)
url = urljoin(base_url, path)
try:
return self.driver.get(url)
except TimeoutException:
if self.ignore_page_load_timeouts:
pass
else:
raise PageTimeoutException.PageTimeoutException(self, url)
return self.execute_and_handle_webdriver_exceptions(lambda: _visit(url))
def back(self):
"""
Navigate to the previous page
"""
return self.driver.back()
def close(self):
"""
Close the driver
"""
return self.driver.close()
def forward(self):
"""
Navigate forward
"""
return self.driver.forward()
def refresh(self):
"""
Refresh the current page
"""
return self.driver.refresh()
def switch_to_iframe(self, iframe):
"""
@type iframe: webdriverwrapper.WebElementWrapper
@param iframe: iframe to select
@return: driver w/ selected iframe
"""
return self.driver.switch_to_frame(iframe.element)
def is_alert_present(self):
"""Tests if an alert is present
@return: True if alert is present, False otherwise
"""
current_frame = None
try:
current_frame = self.driver.current_window_handle
a = self.driver.switch_to_alert()
a.text
except NoAlertPresentException:
# No alert
return False
except UnexpectedAlertPresentException:
# Alert exists
return True
finally:
if current_frame:
self.driver.switch_to_window(current_frame)
return True
def switch_to_alert(self):
"""
@return: javascript alert object
"""
# TODO: for the switch_to_xxxx methods, add a wait_for_xxxx and call that
# (alerts/windows may not be present for a split second)
return self.driver.switch_to_alert()
def switch_to_window(self, window_name):
"""
@param window_name: name of the window
@return: the new window handle
"""
return self.driver.switch_to_window(window_name)
def switch_to_default_content(self):
"""
@return: driver w/ default content
"""
return self.driver.switch_to_default_content()
def current_window_handle(self):
"""
@return: Current window handle
"""
return self.driver.current_window_handle
def window_handles(self):
"""
@return: all open window handles
"""
return self.driver.window_handles
#
# WebDriver Finds
#
def find(self, locator, find_all=False, search_object=None, force_find=False, exclude_invisible=False):
"""
Attempts to locate an element, trying the number of times specified by the driver wrapper;
Will throw a WebDriverWrapperException if no element is found
@type locator: webdriverwrapper.support.locator.Locator
@param locator: the locator or css string used to query the element
@type find_all: bool
@param find_all: set to True to locate all located elements as a list
@type search_object: webdriverwrapper.WebElementWrapper
@param force_find: If true will use javascript to find elements
@type force_find: bool
@param search_object: A WebDriver or WebElement object to call find_element(s)_by_xxxxx
"""
search_object = self.driver if search_object is None else search_object
attempts = 0
while attempts < self.find_attempts + 1:
if bool(force_find):
js_locator = self.locator_handler.parse_locator(locator)
if js_locator.By != 'css selector':
raise ValueError(
'You must use a css locator in order to force find an element; this was "{}"'.format(
js_locator))
elements = self.js_executor.execute_template_and_return_result(
'getElementsTemplate.js', variables={'selector': js_locator.value})
else:
elements = self.locator_handler.find_by_locator(search_object, locator, True)
# Save original elements found before applying filters to the list
all_elements = elements
# Check for only visible elements
visible_elements = elements
if exclude_invisible:
visible_elements = [element for element in all_elements if element.is_displayed()]
elements = visible_elements
if len(elements) > 0:
if find_all is True:
# return list of wrapped elements
for index in range(len(elements)):
elements[index] = WebElementWrapper.WebElementWrapper(self, locator, elements[index],
search_object=search_object)
return elements
elif find_all is False:
# return first element
return WebElementWrapper.WebElementWrapper(self, locator, elements[0], search_object=search_object)
else:
if attempts >= self.find_attempts:
if find_all is True: # returns an empty list if finding all elements
return []
else: # raise exception if attempting to find one element
error_message = "Unable to find element after {0} attempts with locator: {1}".format(
attempts,
locator
)
# Check if filters limited the results
if exclude_invisible and len(visible_elements) == 0 and len(all_elements) > 0:
error_message = "Elements found using locator {}, but none were visible".format(locator)
raise WebDriverWrapperException.WebDriverWrapperException(self, error_message)
else:
attempts += 1
def _find_immediately(self, locator, search_object=None):
'''
Attempts to immediately find elements on the page without waiting
@type locator: webdriverwrapper.support.locator.Locator
@param locator: Locator object describing
@type search_object: webdriverwrapper.WebElementWrapper
@param search_object: Optional WebElement to start search with. If null, search will be on self.driver
@return: Single WebElemetnWrapper if find_all is False,
list of WebElementWrappers if find_all is True
'''
search_object = self.driver if search_object is None else search_object
elements = self.locator_handler.find_by_locator(search_object, locator, True)
return [WebElementWrapper.WebElementWrapper(self, locator, element) for element in elements]
def find_all(self, locator, search_object=None, force_find=False):
'''
Find all elements matching locator
@type locator: webdriverwrapper.support.locator.Locator
@param locator: Locator object describing
@rtype: list[WebElementWrapper]
@return: list of WebElementWrappers
'''
return self.find(locator=locator, find_all=True, search_object=search_object, force_find=force_find)
def find_by_dynamic_locator(self, template_locator, variables, find_all=False, search_object=None):
'''
Find with dynamic locator
@type template_locator: webdriverwrapper.support.locator.Locator
@param template_locator: Template locator w/ formatting bits to insert
@type variables: dict
@param variables: Dictionary of variable substitutions
@type find_all: bool
@param find_all: True to find all elements immediately, False for find single element only
@type search_object: webdriverwrapper.WebElementWrapper
@param search_object: Optional WebElement to start search with.
If null, search will be on self.driver
@rtype: webdriverwrapper.WebElementWrapper or list()
@return: Single WebElemetnWrapper if find_all is False,
list of WebElementWrappers if find_all is True
'''
template_variable_character = '%'
# raise an exception if user passed non-dictionary variables
if not isinstance(variables, dict):
raise TypeError('You must use a dictionary to populate locator variables')
# replace all variables that match the keys in 'variables' dict
locator = ""
for key in variables.keys():
locator = template_locator.replace(template_variable_character + key, variables[key])
return self.find(locator, find_all, search_object)
def find_all_by_dynamic_locator(self, template_locator, variables):
'''
Find with dynamic locator
@type template_locator: webdriverwrapper.support.locator.Locator
@param template_locator: Template locator w/ formatting bits to insert
@type variables: dict
@param variables: Dictionary of variable substitutions
@rtype: webdriverwrapper.WebElementWrapper or list()
@return: Single WebElemetnWrapper if find_all is False,
list of WebElementWrappers if find_all is True
'''
return self.find_by_dynamic_locator(template_locator, variables, True)
def is_present(self, locator, search_object=None):
"""
Determines whether an element is present on the page, retrying once if unable to locate
@type locator: webdriverwrapper.support.locator.Locator
@param locator: the locator or css string used to query the element
@type search_object: webdriverwrapper.WebElementWrapper
@param search_object: Optional WebElement to start search with.
If null, search will be on self.driver
"""
all_elements = self._find_immediately(locator, search_object=search_object)
if all_elements is not None and len(all_elements) > 0:
return True
else:
return False
def is_present_no_wait(self, locator):
"""
Determines whether an element is present on the page with no wait
@type locator: webdriverwrapper.support.locator.Locator
@param locator: the locator or css string used to query the element
"""
# first attempt to locate the element
def execute():
'''
Generic function to execute wait
'''
return True if len(self.locator_handler.find_by_locator(self.driver, locator, True)) < 0 else False
return self.execute_and_handle_webdriver_exceptions(
execute, timeout=0, locator=locator, failure_message='Error running webdriver.find_all.')
#
# WebDriver Waits
#
def wait_until(self, wait_function, failure_message=None, timeout=None):
"""
Base wait method: called by other wait functions to execute wait
@type wait_function: types.FunctionType
@param wait_function: Generic function to be executed
@type failure_message: str
@param failure_message: Message to fail with if exception is raised
@type timeout: int
@param timeout: timeout override
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found
"""
timeout = timeout if timeout is not None else self.timeout
failure_message = failure_message if failure_message is not None else \
'Timeout waiting for custom function to return True'
def wait():
'''
Wait function passed to executor
'''
return WebDriverWait(self, timeout).until(lambda dw: wait_function())
return self.execute_and_handle_webdriver_exceptions(wait, timeout, None, failure_message)
def wait_for(self, locator, timeout=None):
"""
Waits until an element can be found (alias for wait_until_present)
@type locator: webdriverwrapper.support.locator.Locator
@param locator: the locator or css string to search for the element
@type timeout: int
@param timeout: the maximum number of seconds the driver will wait before timing out
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found"""
return self.wait_until_present(locator, timeout)
def wait_until_present(self, locator, timeout=None, failure_message='Timeout waiting for element to be present'):
"""
Waits for an element to be present
@type locator: webdriverwrapper.support.locator.Locator
@param locator: the locator or css string to search for the element
@type timeout: int
@param timeout: the maximum number of seconds the driver will wait before timing out
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found
"""
timeout = timeout if timeout is not None else self.timeout
def wait():
'''
Wait function passed to executor
'''
element = WebDriverWait(self.driver, timeout).until(EC.presence_of_element_located(
(self.locator_handler.parse_locator(locator).By, self.locator_handler.parse_locator(locator).value)))
return WebElementWrapper.WebElementWrapper(self, locator, element)
return self.execute_and_handle_webdriver_exceptions(
wait, timeout, locator, failure_message=failure_message)
def wait_until_not_present(self, locator, timeout=None):
"""
Waits for an element to no longer be present
@type locator: webdriverwrapper.support.locator.Locator
@param locator: the locator or css string to search for the element
@type timeout: int
@param timeout: the maximum number of seconds the driver will wait before timing out
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found
"""
# TODO: rethink about neg case with is_present and waiting too long
timeout = timeout if timeout is not None else self.timeout
this = self # for passing WebDriverWrapperReference to WebDriverWait
def wait():
'''
Wait function pasted to executor
'''
return WebDriverWait(self.driver, timeout).until(lambda d: not this.is_present(locator))
return self.execute_and_handle_webdriver_exceptions(
wait, timeout, locator, 'Timeout waiting for element not to be present')
def wait_until_visibility_of(self, locator, timeout=None,
failure_message='Timeout waiting for element to be visible'):
"""
Waits for an element to be visible
@type locator: webdriverwrapper.support.locator.Locator
@param locator: the locator or css string to search for the element
@type timeout: int
@param timeout: the maximum number of seconds the driver will wait before timing out
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found
"""
timeout = timeout if timeout is not None else self.timeout
def wait():
'''
Wait function passed to executor
'''
element = WebDriverWait(self.driver, timeout).until(EC.visibility_of_element_located(
(self.locator_handler.parse_locator(locator).By, self.locator_handler.parse_locator(locator).value)))
return WebElementWrapper.WebElementWrapper(self, locator, element)
return self.execute_and_handle_webdriver_exceptions(
wait,
timeout,
locator,
failure_message
)
def wait_until_invisibility_of(self, locator, timeout=None):
"""
Waits for an element to be invisible
@type locator: webdriverwrapper.support.locator.Locator
@param locator: the locator or css string to search for the element
@type timeout: int
@param timeout: the maximum number of seconds the driver will wait before timing out
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found
"""
timeout = timeout if timeout is not None else self.timeout
def wait():
'''
Wait function passed to executor
'''
element = WebDriverWait(self.driver, timeout).until(EC.invisibility_of_element_located(
(self.locator_handler.parse_locator(locator).By, self.locator_handler.parse_locator(locator).value)))
return WebElementWrapper.WebElementWrapper(self, locator, element)
return self.execute_and_handle_webdriver_exceptions(
wait, timeout, locator, 'Timeout waiting for element to be invisible')
def wait_until_clickable(self, locator, timeout=None):
"""
Waits for an element to be clickable
@type locator: webdriverwrapper.support.locator.Locator
@param locator: the locator or css string to search for the element
@type timeout: int
@param timeout: the maximum number of seconds the driver will wait before timing out
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found
"""
timeout = timeout if timeout is not None else self.timeout
def wait():
'''
Wait function passed to executor
'''
element = WebDriverWait(self.driver, timeout).until(EC.element_to_be_clickable(
(self.locator_handler.parse_locator(locator).By, self.locator_handler.parse_locator(locator).value)))
return WebElementWrapper.WebElementWrapper(self, locator, element)
return self.execute_and_handle_webdriver_exceptions(
wait, timeout, locator, 'Timeout waiting for element to be clickable')
def wait_until_stale(self, locator, timeout=None):
"""
Waits for an element to be stale in the DOM
@type locator: webdriverwrapper.support.locator.Locator
@param locator: the locator or css string to search for the element
@type timeout: int
@param timeout: the maximum number of seconds the driver will wait before timing out
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found
"""
timeout = timeout if timeout is not None else self.timeout
def wait():
'''
Wait function passed to executor
'''
element = WebDriverWait(self.driver, timeout).until(EC.staleness_of(
(self.locator_handler.parse_locator(locator).By, self.locator_handler.parse_locator(locator).value)))
return WebElementWrapper.WebElementWrapper(self, locator, element)
return self.execute_and_handle_webdriver_exceptions(
wait, timeout, locator, 'Timeout waiting for element to become stale')
# TODO: more precise exception for non-element timeouts
def wait_until_title_contains(self, partial_title, timeout=None):
"""
Waits for title to contain <partial_title>
@type partial_title: str
@param partial_title: the partial title to locate
@type timeout: int
@param timeout: the maximum number of seconds the driver will wait before timing out
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found
"""
timeout = timeout if timeout is not None else self.timeout
def wait():
'''
Wait function passed to executor
'''
return WebDriverWait(self.driver, timeout).until(EC.title_contains(partial_title))
return self.execute_and_handle_webdriver_exceptions(
wait, timeout, partial_title, 'Timeout waiting for title to contain: ' + str(partial_title))
def wait_until_title_is(self, title, timeout=None):
"""
Waits for title to be exactly <partial_title>
@type title: str
@param title: the exact title to locate
@type timeout: int
@param timeout: the maximum number of seconds the driver will wait before timing out
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found
"""
timeout = timeout if timeout is not None else self.timeout
def wait():
'''
Wait function passed to executor
'''
return WebDriverWait(self.driver, timeout).until(EC.title_is(title))
return self.execute_and_handle_webdriver_exceptions(
wait, timeout, title, 'Timeout waiting for title to be: ' + str(title))
def wait_until_alert_is_present(self, timeout=None):
"""
Waits for an alert to be present
@type timeout: int
@param timeout: the maximum number of seconds the driver will wait before timing out
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found
"""
timeout = timeout if timeout is not None else self.timeout
locator = None
def wait():
'''
Wait function passed to executor
'''
return WebDriverWait(self.driver, timeout).until(EC.alert_is_present())
return self.execute_and_handle_webdriver_exceptions(
wait, timeout, locator, 'Timeout waiting for alert to be present')
def wait_until_text_contains(self, locator, text, timeout=None):
"""
Waits for an element's text to contain <text>
@type locator: webdriverwrapper.support.locator.Locator
@param locator: locator used to find element
@type text: str
@param text: the text to search for
@type timeout: int
@param timeout: the maximum number of seconds the driver will wait before timing out
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found
"""
timeout = timeout if timeout is not None else self.timeout
this = self
self.wait_for(locator) # first check that element exists
def wait():
'''
Wait function passed to executor
'''
WebDriverWait(self.driver, timeout).until(lambda d: text in this.find(locator).text())
return this.find(locator)
return self.execute_and_handle_webdriver_exceptions(
wait, timeout, locator, 'Timeout waiting for text to contain: ' + str(text))
def wait_until_text_is(self, locator, text, timeout=None):
"""
Waits for an element's text to exactly match <text>
@type locator: webdriverwrapper.support.locator.Locator
@param locator: locator used to find element
@type text: str
@param text: the text to search for
@type timeout: int
@param timeout: the maximum number of seconds the driver will wait before timing out
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found
"""
timeout = timeout if timeout is not None else self.timeout
this = self
self.wait_for(locator) # first check that element exists
def wait():
'''
Wait function passed to executor
'''
WebDriverWait(self.driver, timeout).until(lambda d: text == this.find(locator).text())
return this.find(locator)
return self.execute_and_handle_webdriver_exceptions(
wait, timeout, locator, 'Timeout waiting for text to be: ' + str(text))
def wait_until_text_is_not_empty(self, locator, timeout=None):
"""
Waits for an element's text to not be empty
@type locator: webdriverwrapper.support.locator.Locator
@param locator: locator used to find element
@type timeout: int
@param timeout: the maximum number of seconds the driver will wait before timing out
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found
"""
timeout = timeout if timeout is not None else self.timeout
self.wait_for(locator) # first check that element exists
def wait():
'''
Wait function passed to executor
'''
WebDriverWait(self.driver, timeout).until(lambda d: len(self.find(locator).text()) > 0)
return self.find(locator)
return self.execute_and_handle_webdriver_exceptions(
wait, timeout, locator, 'Timeout waiting for element to contain some text')
def wait_until_page_source_contains(self, text, timeout=None):
"""
Waits for the page source to contain <text>
@type text: str
@param text: the text to search for
@type timeout: int
@param timeout: the maximum number of seconds the driver will wait before timing out
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found
"""
timeout = timeout if timeout is not None else self.timeout
def wait():
'''
Wait function passed to executor
'''
WebDriverWait(self.driver, timeout).until(lambda d: text in self.page_source())
return self.page_source()
return self.execute_and_handle_webdriver_exceptions(
wait, timeout, text, 'Timeout waiting for source to contain: {}'.format(text))
def wait_until_jquery_requests_are_closed(self, timeout=None):
"""Waits for AJAX requests made through
@type timeout: int
@param timeout: the maximum number of seconds the driver will wait before timing out
@return: None
"""
timeout = timeout if timeout is not None else self.timeout
def wait():
'''
Wait function passed to executor
'''
WebDriverWait(self.driver, timeout).until(
lambda d: self.js_executor.execute_template('isJqueryAjaxComplete', {}))
return True
return self.execute_and_handle_webdriver_exceptions(
wait, timeout, None, 'Timeout waiting for all jQuery AJAX requests to close')
def execute_and_handle_webdriver_exceptions(self, function_to_execute, timeout=None, locator=None, failure_message=None):
"""
Executor for wait functions
@type function_to_execute: types.FunctionType
@param function_to_execute: wait function specifying the type of wait
@type timeout: int
@param timeout: the maximum number of seconds the driver will wait before timing out
@type locator: webdriverwrapper.support.locator.Locator
@param locator: locator used to find element
@type failure_message: str
@param failure_message: message shown in exception if wait fails
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found
"""
logger = logging.getLogger(__name__)
try:
val = function_to_execute()
for cb in self.action_callbacks:
cb.__call__(self)
return val
except TimeoutException:
raise WebDriverTimeoutException.WebDriverTimeoutException(self, timeout, locator, failure_message)
except httplib.BadStatusLine, e:
logger.error('BadStatusLine error raised on WebDriver action (line: {}, args:{}, message: {})'.format(
e.line,
e.args,
e.message
))
raise
except httplib.CannotSendRequest:
logger.error('CannotSendRequest error raised on WebDriver action')
raise
except UnexpectedAlertPresentException:
# NOTE: handling alerts in this way expects that WebDriver does not dismiss unexpected alerts. That
# setting can be changed by modifying the unexpectedAlertBehaviour setting
msg = '<failed to parse message from alert>'
try:
a = self.driver.switch_to_alert()
msg = a.text
except Exception, e:
msg = '<error parsing alert due to {} (note: parsing ' \
'alert text expects "unexpectedAlertBehaviour" to be set to "ignore")>'.format(e)
logger.critical(msg)
finally:
logger.error('Unexpected alert raised on a WebDriver action; alert message was: {}'.format(msg))
raise UnexpectedAlertPresentException('Unexpected alert on page, alert message was: "{}"'.format(msg))
#
# Browser Interaction
#
def get_screenshot_as_png(self):
"""Gets the screenshot of the page as binary data
@return: The binary data of the screenshot
"""
return self.driver.get_screenshot_as_png()
def execute_script(self, script, args=None):
"""
JavaScript executor
@type script: str
@param script: javascript to execute
@type args: dict
@param args: dict of args
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found
"""
return self.js_executor.execute_script(script, args)
def pause_and_wait_for_user(self, timeout=None, prompt_text='Click to resume (WebDriver is paused)'):
"""Injects a radio button into the page and waits for the user to click it; will raise an exception if the
radio to resume is never checked
@return: None
"""
timeout = timeout if timeout is not None else self.user_wait_timeout
# Set the browser state paused
self.paused = True
def check_user_ready(driver):
"""Polls for the user to be "ready" (meaning they checked the checkbox) and the driver to be unpaused.
If the checkbox is not displayed (e.g. user navigates the page), it will re-insert it into the page
@type driver: WebDriverWrapper
@param driver: Driver to execute
@return: True if user is ready, false if not
"""
if driver.paused:
if driver.is_user_ready():
# User indicated they are ready; free the browser lock
driver.paused = False
return True
else:
if not driver.is_present(Locator('css', '#webdriver-resume-radio', 'radio to unpause webdriver')):
# Display the prompt
pause_html = staticreader.read_html_file('webdriverpaused.html')\
.replace('\n', '')\
.replace('PROMPT_TEXT', prompt_text)
webdriver_style = staticreader.read_css_file('webdriverstyle.css').replace('\n', '')
# Insert the webdriver style
driver.js_executor.execute_template_and_return_result(
'injectCssTemplate.js',
{'css': webdriver_style})
# Insert the paused html
driver.js_executor.execute_template_and_return_result(
'injectHtmlTemplate.js',
{'selector': 'body', 'html': pause_html})
return False
self.wait_until(
lambda: check_user_ready(self),
timeout=timeout,
failure_message='Webdriver actions were paused but did not receive the command to continue. '
'You must click the on-screen message to resume.'
)
# Remove all injected elements
self.js_executor.execute_template_and_return_result(
'deleteElementsTemplate.js',
{'selector': '.webdriver-injected'}
)
def is_process_running(self):
"""Checks if the driver process is running
@return: True if PID present
"""
try:
os.kill(self.driver_pid, 0)
except OSError:
return False
else:
return True
def is_user_ready(self):
"""Checks if the radio button indicating webdriver is paused is present and unchecked
@rtype: bool
@return: True if the paused radio is present on the page and unchecked; false otherwise
"""
user_ready = not self.js_executor.execute_template_and_return_result('isWaitingForUser.js', {})
return user_ready
def add_cookie(self, cookie_dict):
"""Adds cookie with dictionary, requires keys "name" and "value" and takes optional keys: "path," "domain,"
"secure", and "expiry"
@type cookie_dict: dict
@param cookie_dict: dictionary of cookies. requires keys "name" and "value" and takes optional keys:
"path," "domain," "secure", and "expiry"
"""
return self.driver.add_cookie(cookie_dict)
def delete_all_cookies(self):
"""
Delete all cookies from current session
"""
return self.driver.delete_all_cookies()
def delete_cookie(self, name):
"""
Delete specific cookie from current session
@type name: str
@params name: name of cookie to delete
"""
return self.driver.delete_cookie(name)
def get_cookie(self, name):
"""
Retrieve specific cookie from current session
@type name: str
@params name: name of cookie to retrieve
"""
return self.driver.get_cookie(name)
def get_cookies(self):
"""
Retrieve all cookies
"""
return self.driver.get_cookies()
def get_browser_log(self, levels=None):
"""Gets the console log of the browser
@type levels:
@return: List of browser log entries
"""
logs = self.driver.get_log('browser')
self.browser_logs += logs
if levels is not None:
logs = [entry for entry in logs if entry.get(u'level') in levels]
return logs
def get_window_size(self):
"""Gets the window size
@return: Window size
"""
return self.driver.get_window_size()
def set_window_size(self, width, height):
"""
Sets the window width and height of the browser
@return: None
"""
self.driver.set_window_size(width, height)
def quit(self):
"""Close driver and kill all associated displays
"""
# Kill the driver
def _quit():
try:
self.driver.quit()
except Exception, err_driver:
os.kill(self.driver_pid, signal.SIGKILL)
raise
finally:
# Kill the display for this driver window
try:
if self.display:
self.display.stop()
except Exception, err_display:
os.kill(self.display_pid, signal.SIGKILL)
raise
return self.execute_and_handle_webdriver_exceptions(_quit)
|
Shapeways/coyote_framework
|
coyote_framework/webdriver/webdriverwrapper/WebDriverWrapper.py
|
Python
|
mit
| 44,327
|
[
"VisIt"
] |
af28a6400aa28d5105d341cd113bffbee2a9e7a0e7db486344bcd14f36cd22dc
|
from zope.interface import Interface
from foolscap.api import StringConstraint, ListOf, TupleOf, SetOf, DictOf, \
ChoiceOf, IntegerConstraint, Any, RemoteInterface, Referenceable
HASH_SIZE=32
SALT_SIZE=16
SDMF_VERSION=0
MDMF_VERSION=1
Hash = StringConstraint(maxLength=HASH_SIZE,
minLength=HASH_SIZE)# binary format 32-byte SHA256 hash
Nodeid = StringConstraint(maxLength=20,
minLength=20) # binary format 20-byte SHA1 hash
FURL = StringConstraint(1000)
StorageIndex = StringConstraint(16)
URI = StringConstraint(300) # kind of arbitrary
MAX_BUCKETS = 256 # per peer -- zfec offers at most 256 shares per file
DEFAULT_MAX_SEGMENT_SIZE = 128*1024
ShareData = StringConstraint(None)
URIExtensionData = StringConstraint(1000)
Number = IntegerConstraint(8) # 2**(8*8) == 16EiB ~= 18e18 ~= 18 exabytes
Offset = Number
ReadSize = int # the 'int' constraint is 2**31 == 2Gib -- large files are processed in not-so-large increments
WriteEnablerSecret = Hash # used to protect mutable share modifications
LeaseRenewSecret = Hash # used to protect lease renewal requests
LeaseCancelSecret = Hash # was used to protect lease cancellation requests
class RIBucketWriter(RemoteInterface):
""" Objects of this kind live on the server side. """
def write(offset=Offset, data=ShareData):
return None
def close():
"""
If the data that has been written is incomplete or inconsistent then
the server will throw the data away, else it will store it for future
retrieval.
"""
return None
def abort():
"""Abandon all the data that has been written.
"""
return None
class RIBucketReader(RemoteInterface):
def read(offset=Offset, length=ReadSize):
return ShareData
def advise_corrupt_share(reason=str):
"""Clients who discover hash failures in shares that they have
downloaded from me will use this method to inform me about the
failures. I will record their concern so that my operator can
manually inspect the shares in question. I return None.
This is a wrapper around RIStorageServer.advise_corrupt_share()
that is tied to a specific share, and therefore does not need the
extra share-identifying arguments. Please see that method for full
documentation.
"""
TestVector = ListOf(TupleOf(Offset, ReadSize, str, str))
# elements are (offset, length, operator, specimen)
# operator is one of "lt, le, eq, ne, ge, gt"
# nop always passes and is used to fetch data while writing.
# you should use length==len(specimen) for everything except nop
DataVector = ListOf(TupleOf(Offset, ShareData))
# (offset, data). This limits us to 30 writes of 1MiB each per call
TestAndWriteVectorsForShares = DictOf(int,
TupleOf(TestVector,
DataVector,
ChoiceOf(None, Offset), # new_length
))
ReadVector = ListOf(TupleOf(Offset, ReadSize))
ReadData = ListOf(ShareData)
# returns data[offset:offset+length] for each element of TestVector
class RIStorageServer(RemoteInterface):
__remote_name__ = "RIStorageServer.tahoe.allmydata.com"
def get_version():
"""
Return a dictionary of version information.
"""
return DictOf(str, Any())
def allocate_buckets(storage_index=StorageIndex,
renew_secret=LeaseRenewSecret,
cancel_secret=LeaseCancelSecret,
sharenums=SetOf(int, maxLength=MAX_BUCKETS),
allocated_size=Offset, canary=Referenceable):
"""
@param storage_index: the index of the bucket to be created or
increfed.
@param sharenums: these are the share numbers (probably between 0 and
99) that the sender is proposing to store on this
server.
@param renew_secret: This is the secret used to protect bucket refresh
This secret is generated by the client and
stored for later comparison by the server. Each
server is given a different secret.
@param cancel_secret: This no longer allows lease cancellation, but
must still be a unique value identifying the
lease. XXX stop relying on it to be unique.
@param canary: If the canary is lost before close(), the bucket is
deleted.
@return: tuple of (alreadygot, allocated), where alreadygot is what we
already have and allocated is what we hereby agree to accept.
New leases are added for shares in both lists.
"""
return TupleOf(SetOf(int, maxLength=MAX_BUCKETS),
DictOf(int, RIBucketWriter, maxKeys=MAX_BUCKETS))
def add_lease(storage_index=StorageIndex,
renew_secret=LeaseRenewSecret,
cancel_secret=LeaseCancelSecret):
"""
Add a new lease on the given bucket. If the renew_secret matches an
existing lease, that lease will be renewed instead. If there is no
bucket for the given storage_index, return silently. (note that in
tahoe-1.3.0 and earlier, IndexError was raised if there was no
bucket)
"""
return Any() # returns None now, but future versions might change
def renew_lease(storage_index=StorageIndex, renew_secret=LeaseRenewSecret):
"""
Renew the lease on a given bucket, resetting the timer to 31 days.
Some networks will use this, some will not. If there is no bucket for
the given storage_index, IndexError will be raised.
For mutable shares, if the given renew_secret does not match an
existing lease, IndexError will be raised with a note listing the
server-nodeids on the existing leases, so leases on migrated shares
can be renewed. For immutable shares, IndexError (without the note)
will be raised.
"""
return Any()
def get_buckets(storage_index=StorageIndex):
return DictOf(int, RIBucketReader, maxKeys=MAX_BUCKETS)
def slot_readv(storage_index=StorageIndex,
shares=ListOf(int), readv=ReadVector):
"""Read a vector from the numbered shares associated with the given
storage index. An empty shares list means to return data from all
known shares. Returns a dictionary with one key per share."""
return DictOf(int, ReadData) # shnum -> results
def slot_testv_and_readv_and_writev(storage_index=StorageIndex,
secrets=TupleOf(WriteEnablerSecret,
LeaseRenewSecret,
LeaseCancelSecret),
tw_vectors=TestAndWriteVectorsForShares,
r_vector=ReadVector,
):
"""
General-purpose test-read-and-set operation for mutable slots:
(1) For submitted shnums, compare the test vectors against extant
shares, or against an empty share for shnums that do not exist.
(2) Use the read vectors to extract "old data" from extant shares.
(3) If all tests in (1) passed, then apply the write vectors
(possibly creating new shares).
(4) Return whether the tests passed, and the "old data", which does
not include any modifications made by the writes.
The operation does not interleave with other operations on the same
shareset.
This method is, um, large. The goal is to allow clients to update all
the shares associated with a mutable file in a single round trip.
@param storage_index: the index of the bucket to be created or
increfed.
@param write_enabler: a secret that is stored along with the slot.
Writes are accepted from any caller who can
present the matching secret. A different secret
should be used for each slot*server pair.
@param renew_secret: This is the secret used to protect bucket refresh
This secret is generated by the client and
stored for later comparison by the server. Each
server is given a different secret.
@param cancel_secret: This no longer allows lease cancellation, but
must still be a unique value identifying the
lease. XXX stop relying on it to be unique.
The 'secrets' argument is a tuple of (write_enabler, renew_secret,
cancel_secret). The first is required to perform any write. The
latter two are used when allocating new shares. To simply acquire a
new lease on existing shares, use an empty testv and an empty writev.
Each share can have a separate test vector (i.e. a list of
comparisons to perform). If all vectors for all shares pass, then all
writes for all shares are recorded. Each comparison is a 4-tuple of
(offset, length, operator, specimen), which effectively does a
bool( (read(offset, length)) OPERATOR specimen ) and only performs
the write if all these evaluate to True. Basic test-and-set uses 'eq'.
Write-if-newer uses a seqnum and (offset, length, 'lt', specimen).
Write-if-same-or-newer uses 'le'.
Reads from the end of the container are truncated, and missing shares
behave like empty ones, so to assert that a share doesn't exist (for
use when creating a new share), use (0, 1, 'eq', '').
The write vector will be applied to the given share, expanding it if
necessary. A write vector applied to a share number that did not
exist previously will cause that share to be created. Write vectors
must not overlap (if they do, this will either cause an error or
apply them in an unspecified order). Duplicate write vectors, with
the same offset and data, are currently tolerated but are not
desirable.
In Tahoe-LAFS v1.8.3 or later (except 1.9.0a1), if you send a write
vector whose offset is beyond the end of the current data, the space
between the end of the current data and the beginning of the write
vector will be filled with zero bytes. In earlier versions the
contents of this space was unspecified (and might end up containing
secrets). Storage servers with the new zero-filling behavior will
advertise a true value for the 'fills-holes-with-zero-bytes' key
(under 'http://allmydata.org/tahoe/protocols/storage/v1') in their
version information.
Each write vector is accompanied by a 'new_length' argument, which
can be used to truncate the data. If new_length is not None and it is
less than the current size of the data (after applying all write
vectors), then the data will be truncated to new_length. If
new_length==0, the share will be deleted.
In Tahoe-LAFS v1.8.2 and earlier, new_length could also be used to
enlarge the file by sending a number larger than the size of the data
after applying all write vectors. That behavior was not used, and as
of Tahoe-LAFS v1.8.3 it no longer works and the new_length is ignored
in that case.
If a storage client knows that the server supports zero-filling, for
example from the 'fills-holes-with-zero-bytes' key in its version
information, it can extend the file efficiently by writing a single
zero byte just before the new end-of-file. Otherwise it must
explicitly write zeroes to all bytes between the old and new
end-of-file. In any case it should avoid sending new_length larger
than the size of the data after applying all write vectors.
The read vector is used to extract data from all known shares,
*before* any writes have been applied. The same read vector is used
for all shares. This captures the state that was tested by the test
vector, for extant shares.
This method returns two values: a boolean and a dict. The boolean is
True if the write vectors were applied, False if not. The dict is
keyed by share number, and each value contains a list of strings, one
for each element of the read vector.
If the write_enabler is wrong, this will raise BadWriteEnablerError.
To enable share migration (using update_write_enabler), the exception
will have the nodeid used for the old write enabler embedded in it,
in the following string::
The write enabler was recorded by nodeid '%s'.
Note that the nodeid here is encoded using the same base32 encoding
used by Foolscap and allmydata.util.idlib.nodeid_b2a().
"""
return TupleOf(bool, DictOf(int, ReadData))
def advise_corrupt_share(share_type=str, storage_index=StorageIndex,
shnum=int, reason=str):
"""Clients who discover hash failures in shares that they have
downloaded from me will use this method to inform me about the
failures. I will record their concern so that my operator can
manually inspect the shares in question. I return None.
'share_type' is either 'mutable' or 'immutable'. 'storage_index' is a
(binary) storage index string, and 'shnum' is the integer share
number. 'reason' is a human-readable explanation of the problem,
probably including some expected hash values and the computed ones
that did not match. Corruption advisories for mutable shares should
include a hash of the public key (the same value that appears in the
mutable-file verify-cap), since the current share format does not
store that on disk.
"""
class IStorageBucketWriter(Interface):
"""
Objects of this kind live on the client side.
"""
def put_block(segmentnum, data):
"""
@param segmentnum=int
@param data=ShareData: For most segments, this data will be 'blocksize'
bytes in length. The last segment might be shorter.
@return: a Deferred that fires (with None) when the operation completes
"""
def put_crypttext_hashes(hashes):
"""
@param hashes=ListOf(Hash)
@return: a Deferred that fires (with None) when the operation completes
"""
def put_block_hashes(blockhashes):
"""
@param blockhashes=ListOf(Hash)
@return: a Deferred that fires (with None) when the operation completes
"""
def put_share_hashes(sharehashes):
"""
@param sharehashes=ListOf(TupleOf(int, Hash))
@return: a Deferred that fires (with None) when the operation completes
"""
def put_uri_extension(data):
"""This block of data contains integrity-checking information (hashes
of plaintext, crypttext, and shares), as well as encoding parameters
that are necessary to recover the data. This is a serialized dict
mapping strings to other strings. The hash of this data is kept in
the URI and verified before any of the data is used. All buckets for
a given file contain identical copies of this data.
The serialization format is specified with the following pseudocode:
for k in sorted(dict.keys()):
assert re.match(r'^[a-zA-Z_\-]+$', k)
write(k + ':' + netstring(dict[k]))
@param data=URIExtensionData
@return: a Deferred that fires (with None) when the operation completes
"""
def close():
"""Finish writing and close the bucket. The share is not finalized
until this method is called: if the uploading client disconnects
before calling close(), the partially-written share will be
discarded.
@return: a Deferred that fires (with None) when the operation completes
"""
class IStorageBucketReader(Interface):
def get_block_data(blocknum, blocksize, size):
"""Most blocks will be the same size. The last block might be shorter
than the others.
@param blocknum=int
@param blocksize=int
@param size=int
@return: ShareData
"""
def get_crypttext_hashes():
"""
@return: ListOf(Hash)
"""
def get_block_hashes(at_least_these=()):
"""
@param at_least_these=SetOf(int)
@return: ListOf(Hash)
"""
def get_share_hashes():
"""
@return: ListOf(TupleOf(int, Hash))
"""
def get_uri_extension():
"""
@return: URIExtensionData
"""
class IStorageBroker(Interface):
def get_servers_for_psi(peer_selection_index):
"""
@return: list of IServer instances
"""
def get_connected_servers():
"""
@return: frozenset of connected IServer instances
"""
def get_known_servers():
"""
@return: frozenset of IServer instances
"""
def get_all_serverids():
"""
@return: frozenset of serverid strings
"""
def get_nickname_for_serverid(serverid):
"""
@return: unicode nickname, or None
"""
# methods moved from IntroducerClient, need review
def get_all_connections():
"""Return a frozenset of (nodeid, service_name, rref) tuples, one for
each active connection we've established to a remote service. This is
mostly useful for unit tests that need to wait until a certain number
of connections have been made."""
def get_all_connectors():
"""Return a dict that maps from (nodeid, service_name) to a
RemoteServiceConnector instance for all services that we are actively
trying to connect to. Each RemoteServiceConnector has the following
public attributes::
service_name: the type of service provided, like 'storage'
announcement_time: when we first heard about this service
last_connect_time: when we last established a connection
last_loss_time: when we last lost a connection
version: the peer's version, from the most recent connection
oldest_supported: the peer's oldest supported version, same
rref: the RemoteReference, if connected, otherwise None
remote_host: the IAddress, if connected, otherwise None
This method is intended for monitoring interfaces, such as a web page
that describes connecting and connected peers.
"""
def get_all_peerids():
"""Return a frozenset of all peerids to whom we have a connection (to
one or more services) established. Mostly useful for unit tests."""
def get_all_connections_for(service_name):
"""Return a frozenset of (nodeid, service_name, rref) tuples, one
for each active connection that provides the given SERVICE_NAME."""
def get_permuted_peers(service_name, key):
"""Returns an ordered list of (peerid, rref) tuples, selecting from
the connections that provide SERVICE_NAME, using a hash-based
permutation keyed by KEY. This randomizes the service list in a
repeatable way, to distribute load over many peers.
"""
class IDisplayableServer(Interface):
def get_nickname():
pass
def get_name():
pass
def get_longname():
pass
class IServer(IDisplayableServer):
"""I live in the client, and represent a single server."""
def start_connecting(tub, trigger_cb):
pass
def get_rref():
"""Once a server is connected, I return a RemoteReference.
Before a server is connected for the first time, I return None.
Note that the rref I return will start producing DeadReferenceErrors
once the connection is lost.
"""
class IMutableSlotWriter(Interface):
"""
The interface for a writer around a mutable slot on a remote server.
"""
def set_checkstring(seqnum_or_checkstring, root_hash=None, salt=None):
"""
Set the checkstring that I will pass to the remote server when
writing.
@param checkstring A packed checkstring to use.
Note that implementations can differ in which semantics they
wish to support for set_checkstring -- they can, for example,
build the checkstring themselves from its constituents, or
some other thing.
"""
def get_checkstring():
"""
Get the checkstring that I think currently exists on the remote
server.
"""
def put_block(data, segnum, salt):
"""
Add a block and salt to the share.
"""
def put_encprivkey(encprivkey):
"""
Add the encrypted private key to the share.
"""
def put_blockhashes(blockhashes):
"""
@param blockhashes=list
Add the block hash tree to the share.
"""
def put_sharehashes(sharehashes):
"""
@param sharehashes=dict
Add the share hash chain to the share.
"""
def get_signable():
"""
Return the part of the share that needs to be signed.
"""
def put_signature(signature):
"""
Add the signature to the share.
"""
def put_verification_key(verification_key):
"""
Add the verification key to the share.
"""
def finish_publishing():
"""
Do anything necessary to finish writing the share to a remote
server. I require that no further publishing needs to take place
after this method has been called.
"""
class IURI(Interface):
def init_from_string(uri):
"""Accept a string (as created by my to_string() method) and populate
this instance with its data. I am not normally called directly,
please use the module-level uri.from_string() function to convert
arbitrary URI strings into IURI-providing instances."""
def is_readonly():
"""Return False if this URI be used to modify the data. Return True
if this URI cannot be used to modify the data."""
def is_mutable():
"""Return True if the data can be modified by *somebody* (perhaps
someone who has a more powerful URI than this one)."""
# TODO: rename to get_read_cap()
def get_readonly():
"""Return another IURI instance that represents a read-only form of
this one. If is_readonly() is True, this returns self."""
def get_verify_cap():
"""Return an instance that provides IVerifierURI, which can be used
to check on the availability of the file or directory, without
providing enough capabilities to actually read or modify the
contents. This may return None if the file does not need checking or
verification (e.g. LIT URIs).
"""
def to_string():
"""Return a string of printable ASCII characters, suitable for
passing into init_from_string."""
class IVerifierURI(Interface, IURI):
def init_from_string(uri):
"""Accept a string (as created by my to_string() method) and populate
this instance with its data. I am not normally called directly,
please use the module-level uri.from_string() function to convert
arbitrary URI strings into IURI-providing instances."""
def to_string():
"""Return a string of printable ASCII characters, suitable for
passing into init_from_string."""
class IDirnodeURI(Interface):
"""I am a URI that represents a dirnode."""
class IFileURI(Interface):
"""I am a URI that represents a filenode."""
def get_size():
"""Return the length (in bytes) of the file that I represent."""
class IImmutableFileURI(IFileURI):
pass
class IMutableFileURI(Interface):
pass
class IDirectoryURI(Interface):
pass
class IReadonlyDirectoryURI(Interface):
pass
class CapConstraintError(Exception):
"""A constraint on a cap was violated."""
class MustBeDeepImmutableError(CapConstraintError):
"""Mutable children cannot be added to an immutable directory.
Also, caps obtained from an immutable directory can trigger this error
if they are later found to refer to a mutable object and then used."""
class MustBeReadonlyError(CapConstraintError):
"""Known write caps cannot be specified in a ro_uri field. Also,
caps obtained from a ro_uri field can trigger this error if they
are later found to be write caps and then used."""
class MustNotBeUnknownRWError(CapConstraintError):
"""Cannot add an unknown child cap specified in a rw_uri field."""
class IReadable(Interface):
"""I represent a readable object -- either an immutable file, or a
specific version of a mutable file.
"""
def is_readonly():
"""Return True if this reference provides mutable access to the given
file or directory (i.e. if you can modify it), or False if not. Note
that even if this reference is read-only, someone else may hold a
read-write reference to it.
For an IReadable returned by get_best_readable_version(), this will
always return True, but for instances of subinterfaces such as
IMutableFileVersion, it may return False."""
def is_mutable():
"""Return True if this file or directory is mutable (by *somebody*,
not necessarily you), False if it is is immutable. Note that a file
might be mutable overall, but your reference to it might be
read-only. On the other hand, all references to an immutable file
will be read-only; there are no read-write references to an immutable
file."""
def get_storage_index():
"""Return the storage index of the file."""
def get_size():
"""Return the length (in bytes) of this readable object."""
def download_to_data():
"""Download all of the file contents. I return a Deferred that fires
with the contents as a byte string."""
def read(consumer, offset=0, size=None):
"""Download a portion (possibly all) of the file's contents, making
them available to the given IConsumer. Return a Deferred that fires
(with the consumer) when the consumer is unregistered (either because
the last byte has been given to it, or because the consumer threw an
exception during write(), possibly because it no longer wants to
receive data). The portion downloaded will start at 'offset' and
contain 'size' bytes (or the remainder of the file if size==None).
The consumer will be used in non-streaming mode: an IPullProducer
will be attached to it.
The consumer will not receive data right away: several network trips
must occur first. The order of events will be::
consumer.registerProducer(p, streaming)
(if streaming == False)::
consumer does p.resumeProducing()
consumer.write(data)
consumer does p.resumeProducing()
consumer.write(data).. (repeat until all data is written)
consumer.unregisterProducer()
deferred.callback(consumer)
If a download error occurs, or an exception is raised by
consumer.registerProducer() or consumer.write(), I will call
consumer.unregisterProducer() and then deliver the exception via
deferred.errback(). To cancel the download, the consumer should call
p.stopProducing(), which will result in an exception being delivered
via deferred.errback().
See src/allmydata/util/consumer.py for an example of a simple
download-to-memory consumer.
"""
class IWriteable(Interface):
"""
I define methods that callers can use to update SDMF and MDMF
mutable files on a Tahoe-LAFS grid.
"""
# XXX: For the moment, we have only this. It is possible that we
# want to move overwrite() and modify() in here too.
def update(data, offset):
"""
I write the data from my data argument to the MDMF file,
starting at offset. I continue writing data until my data
argument is exhausted, appending data to the file as necessary.
"""
# assert IMutableUploadable.providedBy(data)
# to append data: offset=node.get_size_of_best_version()
# do we want to support compacting MDMF?
# for an MDMF file, this can be done with O(data.get_size())
# memory. For an SDMF file, any modification takes
# O(node.get_size_of_best_version()).
class IMutableFileVersion(IReadable):
"""I provide access to a particular version of a mutable file. The
access is read/write if I was obtained from a filenode derived from
a write cap, or read-only if the filenode was derived from a read cap.
"""
def get_sequence_number():
"""Return the sequence number of this version."""
def get_servermap():
"""Return the IMutableFileServerMap instance that was used to create
this object.
"""
def get_writekey():
"""Return this filenode's writekey, or None if the node does not have
write-capability. This may be used to assist with data structures
that need to make certain data available only to writers, such as the
read-write child caps in dirnodes. The recommended process is to have
reader-visible data be submitted to the filenode in the clear (where
it will be encrypted by the filenode using the readkey), but encrypt
writer-visible data using this writekey.
"""
def overwrite(new_contents):
"""Replace the contents of the mutable file, provided that no other
node has published (or is attempting to publish, concurrently) a
newer version of the file than this one.
I will avoid modifying any share that is different than the version
given by get_sequence_number(). However, if another node is writing
to the file at the same time as me, I may manage to update some shares
while they update others. If I see any evidence of this, I will signal
UncoordinatedWriteError, and the file will be left in an inconsistent
state (possibly the version you provided, possibly the old version,
possibly somebody else's version, and possibly a mix of shares from
all of these).
The recommended response to UncoordinatedWriteError is to either
return it to the caller (since they failed to coordinate their
writes), or to attempt some sort of recovery. It may be sufficient to
wait a random interval (with exponential backoff) and repeat your
operation. If I do not signal UncoordinatedWriteError, then I was
able to write the new version without incident.
I return a Deferred that fires (with a PublishStatus object) when the
update has completed.
"""
def modify(modifier_cb):
"""Modify the contents of the file, by downloading this version,
applying the modifier function (or bound method), then uploading
the new version. This will succeed as long as no other node
publishes a version between the download and the upload.
I return a Deferred that fires (with a PublishStatus object) when
the update is complete.
The modifier callable will be given three arguments: a string (with
the old contents), a 'first_time' boolean, and a servermap. As with
download_to_data(), the old contents will be from this version,
but the modifier can use the servermap to make other decisions
(such as refusing to apply the delta if there are multiple parallel
versions, or if there is evidence of a newer unrecoverable version).
'first_time' will be True the first time the modifier is called,
and False on any subsequent calls.
The callable should return a string with the new contents. The
callable must be prepared to be called multiple times, and must
examine the input string to see if the change that it wants to make
is already present in the old version. If it does not need to make
any changes, it can either return None, or return its input string.
If the modifier raises an exception, it will be returned in the
errback.
"""
# The hierarchy looks like this:
# IFilesystemNode
# IFileNode
# IMutableFileNode
# IImmutableFileNode
# IDirectoryNode
class IFilesystemNode(Interface):
def get_cap():
"""Return the strongest 'cap instance' associated with this node.
(writecap for writeable-mutable files/directories, readcap for
immutable or readonly-mutable files/directories). To convert this
into a string, call .to_string() on the result."""
def get_readcap():
"""Return a readonly cap instance for this node. For immutable or
readonly nodes, get_cap() and get_readcap() return the same thing."""
def get_repair_cap():
"""Return an IURI instance that can be used to repair the file, or
None if this node cannot be repaired (either because it is not
distributed, like a LIT file, or because the node does not represent
sufficient authority to create a repair-cap, like a read-only RSA
mutable file node [which cannot create the correct write-enablers]).
"""
def get_verify_cap():
"""Return an IVerifierURI instance that represents the
'verifiy/refresh capability' for this node. The holder of this
capability will be able to renew the lease for this node, protecting
it from garbage-collection. They will also be able to ask a server if
it holds a share for the file or directory.
"""
def get_uri():
"""Return the URI string corresponding to the strongest cap associated
with this node. If this node is read-only, the URI will only offer
read-only access. If this node is read-write, the URI will offer
read-write access.
If you have read-write access to a node and wish to share merely
read-only access with others, use get_readonly_uri().
"""
def get_write_uri():
"""Return the URI string that can be used by others to get write
access to this node, if it is writeable. If this is a read-only node,
return None."""
def get_readonly_uri():
"""Return the URI string that can be used by others to get read-only
access to this node. The result is a read-only URI, regardless of
whether this node is read-only or read-write.
If you have merely read-only access to this node, get_readonly_uri()
will return the same thing as get_uri().
"""
def get_storage_index():
"""Return a string with the (binary) storage index in use on this
download. This may be None if there is no storage index (i.e. LIT
files and directories)."""
def is_readonly():
"""Return True if this reference provides mutable access to the given
file or directory (i.e. if you can modify it), or False if not. Note
that even if this reference is read-only, someone else may hold a
read-write reference to it."""
def is_mutable():
"""Return True if this file or directory is mutable (by *somebody*,
not necessarily you), False if it is is immutable. Note that a file
might be mutable overall, but your reference to it might be
read-only. On the other hand, all references to an immutable file
will be read-only; there are no read-write references to an immutable
file.
"""
def is_unknown():
"""Return True if this is an unknown node."""
def is_allowed_in_immutable_directory():
"""Return True if this node is allowed as a child of a deep-immutable
directory. This is true if either the node is of a known-immutable type,
or it is unknown and read-only.
"""
def raise_error():
"""Raise any error associated with this node."""
# XXX: These may not be appropriate outside the context of an IReadable.
def get_size():
"""Return the length (in bytes) of the data this node represents. For
directory nodes, I return the size of the backing store. I return
synchronously and do not consult the network, so for mutable objects,
I will return the most recently observed size for the object, or None
if I don't remember a size. Use get_current_size, which returns a
Deferred, if you want more up-to-date information."""
def get_current_size():
"""I return a Deferred that fires with the length (in bytes) of the
data this node represents.
"""
class IFileNode(IFilesystemNode):
"""I am a node that represents a file: a sequence of bytes. I am not a
container, like IDirectoryNode."""
def get_best_readable_version():
"""Return a Deferred that fires with an IReadable for the 'best'
available version of the file. The IReadable provides only read
access, even if this filenode was derived from a write cap.
For an immutable file, there is only one version. For a mutable
file, the 'best' version is the recoverable version with the
highest sequence number. If no uncoordinated writes have occurred,
and if enough shares are available, then this will be the most
recent version that has been uploaded. If no version is recoverable,
the Deferred will errback with an UnrecoverableFileError.
"""
def download_best_version():
"""Download the contents of the version that would be returned
by get_best_readable_version(). This is equivalent to calling
download_to_data() on the IReadable given by that method.
I return a Deferred that fires with a byte string when the file
has been fully downloaded. To support streaming download, use
the 'read' method of IReadable. If no version is recoverable,
the Deferred will errback with an UnrecoverableFileError.
"""
def get_size_of_best_version():
"""Find the size of the version that would be returned by
get_best_readable_version().
I return a Deferred that fires with an integer. If no version
is recoverable, the Deferred will errback with an
UnrecoverableFileError.
"""
class IImmutableFileNode(IFileNode, IReadable):
"""I am a node representing an immutable file. Immutable files have
only one version"""
class IMutableFileNode(IFileNode):
"""I provide access to a 'mutable file', which retains its identity
regardless of what contents are put in it.
The consistency-vs-availability problem means that there might be
multiple versions of a file present in the grid, some of which might be
unrecoverable (i.e. have fewer than 'k' shares). These versions are
loosely ordered: each has a sequence number and a hash, and any version
with seqnum=N was uploaded by a node that has seen at least one version
with seqnum=N-1.
The 'servermap' (an instance of IMutableFileServerMap) is used to
describe the versions that are known to be present in the grid, and which
servers are hosting their shares. It is used to represent the 'state of
the world', and is used for this purpose by my test-and-set operations.
Downloading the contents of the mutable file will also return a
servermap. Uploading a new version into the mutable file requires a
servermap as input, and the semantics of the replace operation is
'replace the file with my new version if it looks like nobody else has
changed the file since my previous download'. Because the file is
distributed, this is not a perfect test-and-set operation, but it will do
its best. If the replace process sees evidence of a simultaneous write,
it will signal an UncoordinatedWriteError, so that the caller can take
corrective action.
Most readers will want to use the 'best' current version of the file, and
should use my 'download_best_version()' method.
To unconditionally replace the file, callers should use overwrite(). This
is the mode that user-visible mutable files will probably use.
To apply some delta to the file, call modify() with a callable modifier
function that can apply the modification that you want to make. This is
the mode that dirnodes will use, since most directory modification
operations can be expressed in terms of deltas to the directory state.
Three methods are available for users who need to perform more complex
operations. The first is get_servermap(), which returns an up-to-date
servermap using a specified mode. The second is download_version(), which
downloads a specific version (not necessarily the 'best' one). The third
is 'upload', which accepts new contents and a servermap (which must have
been updated with MODE_WRITE). The upload method will attempt to apply
the new contents as long as no other node has modified the file since the
servermap was updated. This might be useful to a caller who wants to
merge multiple versions into a single new one.
Note that each time the servermap is updated, a specific 'mode' is used,
which determines how many peers are queried. To use a servermap for my
replace() method, that servermap must have been updated in MODE_WRITE.
These modes are defined in allmydata.mutable.common, and consist of
MODE_READ, MODE_WRITE, MODE_ANYTHING, and MODE_CHECK. Please look in
allmydata/mutable/servermap.py for details about the differences.
Mutable files are currently limited in size (about 3.5MB max) and can
only be retrieved and updated all-at-once, as a single big string. Future
versions of our mutable files will remove this restriction.
"""
def get_best_mutable_version():
"""Return a Deferred that fires with an IMutableFileVersion for
the 'best' available version of the file. The best version is
the recoverable version with the highest sequence number. If no
uncoordinated writes have occurred, and if enough shares are
available, then this will be the most recent version that has
been uploaded.
If no version is recoverable, the Deferred will errback with an
UnrecoverableFileError.
"""
def overwrite(new_contents):
"""Unconditionally replace the contents of the mutable file with new
ones. This simply chains get_servermap(MODE_WRITE) and upload(). This
is only appropriate to use when the new contents of the file are
completely unrelated to the old ones, and you do not care about other
clients' changes.
I return a Deferred that fires (with a PublishStatus object) when the
update has completed.
"""
def modify(modifier_cb):
"""Modify the contents of the file, by downloading the current
version, applying the modifier function (or bound method), then
uploading the new version. I return a Deferred that fires (with a
PublishStatus object) when the update is complete.
The modifier callable will be given three arguments: a string (with
the old contents), a 'first_time' boolean, and a servermap. As with
download_best_version(), the old contents will be from the best
recoverable version, but the modifier can use the servermap to make
other decisions (such as refusing to apply the delta if there are
multiple parallel versions, or if there is evidence of a newer
unrecoverable version). 'first_time' will be True the first time the
modifier is called, and False on any subsequent calls.
The callable should return a string with the new contents. The
callable must be prepared to be called multiple times, and must
examine the input string to see if the change that it wants to make
is already present in the old version. If it does not need to make
any changes, it can either return None, or return its input string.
If the modifier raises an exception, it will be returned in the
errback.
"""
def get_servermap(mode):
"""Return a Deferred that fires with an IMutableFileServerMap
instance, updated using the given mode.
"""
def download_version(servermap, version):
"""Download a specific version of the file, using the servermap
as a guide to where the shares are located.
I return a Deferred that fires with the requested contents, or
errbacks with UnrecoverableFileError. Note that a servermap that was
updated with MODE_ANYTHING or MODE_READ may not know about shares for
all versions (those modes stop querying servers as soon as they can
fulfil their goals), so you may want to use MODE_CHECK (which checks
everything) to get increased visibility.
"""
def upload(new_contents, servermap):
"""Replace the contents of the file with new ones. This requires a
servermap that was previously updated with MODE_WRITE.
I attempt to provide test-and-set semantics, in that I will avoid
modifying any share that is different than the version I saw in the
servermap. However, if another node is writing to the file at the
same time as me, I may manage to update some shares while they update
others. If I see any evidence of this, I will signal
UncoordinatedWriteError, and the file will be left in an inconsistent
state (possibly the version you provided, possibly the old version,
possibly somebody else's version, and possibly a mix of shares from
all of these).
The recommended response to UncoordinatedWriteError is to either
return it to the caller (since they failed to coordinate their
writes), or to attempt some sort of recovery. It may be sufficient to
wait a random interval (with exponential backoff) and repeat your
operation. If I do not signal UncoordinatedWriteError, then I was
able to write the new version without incident.
I return a Deferred that fires (with a PublishStatus object) when the
publish has completed. I will update the servermap in-place with the
location of all new shares.
"""
def get_writekey():
"""Return this filenode's writekey, or None if the node does not have
write-capability. This may be used to assist with data structures
that need to make certain data available only to writers, such as the
read-write child caps in dirnodes. The recommended process is to have
reader-visible data be submitted to the filenode in the clear (where
it will be encrypted by the filenode using the readkey), but encrypt
writer-visible data using this writekey.
"""
def get_version():
"""Returns the mutable file protocol version."""
class NotEnoughSharesError(Exception):
"""Download was unable to get enough shares"""
class NoSharesError(Exception):
"""Download was unable to get any shares at all."""
class DownloadStopped(Exception):
pass
class UploadUnhappinessError(Exception):
"""Upload was unable to satisfy 'servers_of_happiness'"""
class UnableToFetchCriticalDownloadDataError(Exception):
"""I was unable to fetch some piece of critical data that is supposed to
be identically present in all shares."""
class NoServersError(Exception):
"""Upload wasn't given any servers to work with, usually indicating a
network or Introducer problem."""
class ExistingChildError(Exception):
"""A directory node was asked to add or replace a child that already
exists, and overwrite= was set to False."""
class NoSuchChildError(Exception):
"""A directory node was asked to fetch a child that does not exist."""
def __str__(self):
# avoid UnicodeEncodeErrors when converting to str
return self.__repr__()
class ChildOfWrongTypeError(Exception):
"""An operation was attempted on a child of the wrong type (file or directory)."""
class IDirectoryNode(IFilesystemNode):
"""I represent a filesystem node that is a container, with a
name-to-child mapping, holding the tahoe equivalent of a directory. All
child names are unicode strings, and all children are some sort of
IFilesystemNode (a file, subdirectory, or unknown node).
"""
def get_uri():
"""
The dirnode ('1') URI returned by this method can be used in
set_uri() on a different directory ('2') to 'mount' a reference to
this directory ('1') under the other ('2'). This URI is just a
string, so it can be passed around through email or other out-of-band
protocol.
"""
def get_readonly_uri():
"""
The dirnode ('1') URI returned by this method can be used in
set_uri() on a different directory ('2') to 'mount' a reference to
this directory ('1') under the other ('2'). This URI is just a
string, so it can be passed around through email or other out-of-band
protocol.
"""
def list():
"""I return a Deferred that fires with a dictionary mapping child
name (a unicode string) to (node, metadata_dict) tuples, in which
'node' is an IFilesystemNode and 'metadata_dict' is a dictionary of
metadata."""
def has_child(name):
"""I return a Deferred that fires with a boolean, True if there
exists a child of the given name, False if not. The child name must
be a unicode string."""
def get(name):
"""I return a Deferred that fires with a specific named child node,
which is an IFilesystemNode. The child name must be a unicode string.
I raise NoSuchChildError if I do not have a child by that name."""
def get_metadata_for(name):
"""I return a Deferred that fires with the metadata dictionary for
a specific named child node. The child name must be a unicode string.
This metadata is stored in the *edge*, not in the child, so it is
attached to the parent dirnode rather than the child node.
I raise NoSuchChildError if I do not have a child by that name."""
def set_metadata_for(name, metadata):
"""I replace any existing metadata for the named child with the new
metadata. The child name must be a unicode string. This metadata is
stored in the *edge*, not in the child, so it is attached to the
parent dirnode rather than the child node. I return a Deferred
(that fires with this dirnode) when the operation is complete.
I raise NoSuchChildError if I do not have a child by that name."""
def get_child_at_path(path):
"""Transform a child path into an IFilesystemNode.
I perform a recursive series of 'get' operations to find the named
descendant node. I return a Deferred that fires with the node, or
errbacks with NoSuchChildError if the node could not be found.
The path can be either a single string (slash-separated) or a list of
path-name elements. All elements must be unicode strings.
"""
def get_child_and_metadata_at_path(path):
"""Transform a child path into an IFilesystemNode and metadata.
I am like get_child_at_path(), but my Deferred fires with a tuple of
(node, metadata). The metadata comes from the last edge. If the path
is empty, the metadata will be an empty dictionary.
"""
def set_uri(name, writecap, readcap=None, metadata=None, overwrite=True):
"""I add a child (by writecap+readcap) at the specific name. I return
a Deferred that fires when the operation finishes. If overwrite= is
True, I will replace any existing child of the same name, otherwise
an existing child will cause me to return ExistingChildError. The
child name must be a unicode string.
The child caps could be for a file, or for a directory. If you have
both the writecap and readcap, you should provide both arguments.
If you have only one cap and don't know whether it is read-only,
provide it as the writecap argument and leave the readcap as None.
If you have only one cap that is known to be read-only, provide it
as the readcap argument and leave the writecap as None.
The filecaps are typically obtained from an IFilesystemNode with
get_uri() and get_readonly_uri().
If metadata= is provided, I will use it as the metadata for the named
edge. This will replace any existing metadata. If metadata= is left
as the default value of None, I will set ['mtime'] to the current
time, and I will set ['ctime'] to the current time if there was not
already a child by this name present. This roughly matches the
ctime/mtime semantics of traditional filesystems. See the
"About the metadata" section of webapi.txt for futher information.
If this directory node is read-only, the Deferred will errback with a
NotWriteableError."""
def set_children(entries, overwrite=True):
"""Add multiple children (by writecap+readcap) to a directory node.
Takes a dictionary, with childname as keys and (writecap, readcap)
tuples (or (writecap, readcap, metadata) triples) as values. Returns
a Deferred that fires (with this dirnode) when the operation
finishes. This is equivalent to calling set_uri() multiple times, but
is much more efficient. All child names must be unicode strings.
"""
def set_node(name, child, metadata=None, overwrite=True):
"""I add a child at the specific name. I return a Deferred that fires
when the operation finishes. This Deferred will fire with the child
node that was just added. I will replace any existing child of the
same name. The child name must be a unicode string. The 'child'
instance must be an instance providing IFilesystemNode.
If metadata= is provided, I will use it as the metadata for the named
edge. This will replace any existing metadata. If metadata= is left
as the default value of None, I will set ['mtime'] to the current
time, and I will set ['ctime'] to the current time if there was not
already a child by this name present. This roughly matches the
ctime/mtime semantics of traditional filesystems. See the
"About the metadata" section of webapi.txt for futher information.
If this directory node is read-only, the Deferred will errback with a
NotWriteableError."""
def set_nodes(entries, overwrite=True):
"""Add multiple children to a directory node. Takes a dict mapping
unicode childname to (child_node, metdata) tuples. If metdata=None,
the original metadata is left unmodified. Returns a Deferred that
fires (with this dirnode) when the operation finishes. This is
equivalent to calling set_node() multiple times, but is much more
efficient."""
def add_file(name, uploadable, metadata=None, overwrite=True):
"""I upload a file (using the given IUploadable), then attach the
resulting ImmutableFileNode to the directory at the given name. I set
metadata the same way as set_uri and set_node. The child name must be
a unicode string.
I return a Deferred that fires (with the IFileNode of the uploaded
file) when the operation completes."""
def delete(name, must_exist=True, must_be_directory=False, must_be_file=False):
"""I remove the child at the specific name. I return a Deferred that
fires when the operation finishes. The child name must be a unicode
string. If must_exist is True and I do not have a child by that name,
I raise NoSuchChildError. If must_be_directory is True and the child
is a file, or if must_be_file is True and the child is a directory,
I raise ChildOfWrongTypeError."""
def create_subdirectory(name, initial_children={}, overwrite=True, metadata=None):
"""I create and attach a directory at the given name. The new
directory can be empty, or it can be populated with children
according to 'initial_children', which takes a dictionary in the same
format as set_nodes (i.e. mapping unicode child name to (childnode,
metadata) tuples). The child name must be a unicode string. I return
a Deferred that fires (with the new directory node) when the
operation finishes."""
def move_child_to(current_child_name, new_parent, new_child_name=None,
overwrite=True):
"""I take one of my children and move them to a new parent. The child
is referenced by name. On the new parent, the child will live under
'new_child_name', which defaults to 'current_child_name'. TODO: what
should we do about metadata? I return a Deferred that fires when the
operation finishes. The child name must be a unicode string. I raise
NoSuchChildError if I do not have a child by that name."""
def build_manifest():
"""I generate a table of everything reachable from this directory.
I also compute deep-stats as described below.
I return a Monitor. The Monitor's results will be a dictionary with
four elements:
res['manifest']: a list of (path, cap) tuples for all nodes
(directories and files) reachable from this one.
'path' will be a tuple of unicode strings. The
origin dirnode will be represented by an empty path
tuple.
res['verifycaps']: a list of (printable) verifycap strings, one for
each reachable non-LIT node. This is a set:
it will contain no duplicates.
res['storage-index']: a list of (base32) storage index strings,
one for each reachable non-LIT node. This is
a set: it will contain no duplicates.
res['stats']: a dictionary, the same that is generated by
start_deep_stats() below.
The Monitor will also have an .origin_si attribute with the (binary)
storage index of the starting point.
"""
def start_deep_stats():
"""Return a Monitor, examining all nodes (directories and files)
reachable from this one. The Monitor's results will be a dictionary
with the following keys::
count-immutable-files: count of how many CHK files are in the set
count-mutable-files: same, for mutable files (does not include
directories)
count-literal-files: same, for LIT files
count-files: sum of the above three
count-directories: count of directories
size-immutable-files: total bytes for all CHK files in the set
size-mutable-files (TODO): same, for current version of all mutable
files, does not include directories
size-literal-files: same, for LIT files
size-directories: size of mutable files used by directories
largest-directory: number of bytes in the largest directory
largest-directory-children: number of children in the largest
directory
largest-immutable-file: number of bytes in the largest CHK file
size-mutable-files is not yet implemented, because it would involve
even more queries than deep_stats does.
The Monitor will also have an .origin_si attribute with the (binary)
storage index of the starting point.
This operation will visit every directory node underneath this one,
and can take a long time to run. On a typical workstation with good
bandwidth, this can examine roughly 15 directories per second (and
takes several minutes of 100% CPU for ~1700 directories).
"""
class ICodecEncoder(Interface):
def set_params(data_size, required_shares, max_shares):
"""Set up the parameters of this encoder.
This prepares the encoder to perform an operation that converts a
single block of data into a number of shares, such that a future
ICodecDecoder can use a subset of these shares to recover the
original data. This operation is invoked by calling encode(). Once
the encoding parameters are set up, the encode operation can be
invoked multiple times.
set_params() prepares the encoder to accept blocks of input data that
are exactly 'data_size' bytes in length. The encoder will be prepared
to produce 'max_shares' shares for each encode() operation (although
see the 'desired_share_ids' to use less CPU). The encoding math will
be chosen such that the decoder can get by with as few as
'required_shares' of these shares and still reproduce the original
data. For example, set_params(1000, 5, 5) offers no redundancy at
all, whereas set_params(1000, 1, 10) provides 10x redundancy.
Numerical Restrictions: 'data_size' is required to be an integral
multiple of 'required_shares'. In general, the caller should choose
required_shares and max_shares based upon their reliability
requirements and the number of peers available (the total storage
space used is roughly equal to max_shares*data_size/required_shares),
then choose data_size to achieve the memory footprint desired (larger
data_size means more efficient operation, smaller data_size means
smaller memory footprint).
In addition, 'max_shares' must be equal to or greater than
'required_shares'. Of course, setting them to be equal causes
encode() to degenerate into a particularly slow form of the 'split'
utility.
See encode() for more details about how these parameters are used.
set_params() must be called before any other ICodecEncoder methods
may be invoked.
"""
def get_params():
"""Return the 3-tuple of data_size, required_shares, max_shares"""
def get_encoder_type():
"""Return a short string that describes the type of this encoder.
There is required to be a global table of encoder classes. This method
returns an index into this table; the value at this index is an
encoder class, and this encoder is an instance of that class.
"""
def get_block_size():
"""Return the length of the shares that encode() will produce.
"""
def encode_proposal(data, desired_share_ids=None):
"""Encode some data.
'data' must be a string (or other buffer object), and len(data) must
be equal to the 'data_size' value passed earlier to set_params().
This will return a Deferred that will fire with two lists. The first
is a list of shares, each of which is a string (or other buffer
object) such that len(share) is the same as what get_share_size()
returned earlier. The second is a list of shareids, in which each is
an integer. The lengths of the two lists will always be equal to each
other. The user should take care to keep each share closely
associated with its shareid, as one is useless without the other.
The length of this output list will normally be the same as the value
provided to the 'max_shares' parameter of set_params(). This may be
different if 'desired_share_ids' is provided.
'desired_share_ids', if provided, is required to be a sequence of
ints, each of which is required to be >= 0 and < max_shares. If not
provided, encode() will produce 'max_shares' shares, as if
'desired_share_ids' were set to range(max_shares). You might use this
if you initially thought you were going to use 10 peers, started
encoding, and then two of the peers dropped out: you could use
desired_share_ids= to skip the work (both memory and CPU) of
producing shares for the peers that are no longer available.
"""
def encode(inshares, desired_share_ids=None):
"""Encode some data. This may be called multiple times. Each call is
independent.
inshares is a sequence of length required_shares, containing buffers
(i.e. strings), where each buffer contains the next contiguous
non-overlapping segment of the input data. Each buffer is required to
be the same length, and the sum of the lengths of the buffers is
required to be exactly the data_size promised by set_params(). (This
implies that the data has to be padded before being passed to
encode(), unless of course it already happens to be an even multiple
of required_shares in length.)
Note: the requirement to break up your data into
'required_shares' chunks of exactly the right length before
calling encode() is surprising from point of view of a user
who doesn't know how FEC works. It feels like an
implementation detail that has leaked outside the abstraction
barrier. Is there a use case in which the data to be encoded
might already be available in pre-segmented chunks, such that
it is faster or less work to make encode() take a list rather
than splitting a single string?
Yes, there is: suppose you are uploading a file with K=64,
N=128, segsize=262,144. Then each in-share will be of size
4096. If you use this .encode() API then your code could first
read each successive 4096-byte chunk from the file and store
each one in a Python string and store each such Python string
in a Python list. Then you could call .encode(), passing that
list as "inshares". The encoder would generate the other 64
"secondary shares" and return to you a new list containing
references to the same 64 Python strings that you passed in
(as the primary shares) plus references to the new 64 Python
strings.
(You could even imagine that your code could use readv() so
that the operating system can arrange to get all of those
bytes copied from the file into the Python list of Python
strings as efficiently as possible instead of having a loop
written in C or in Python to copy the next part of the file
into the next string.)
On the other hand if you instead use the .encode_proposal()
API (above), then your code can first read in all of the
262,144 bytes of the segment from the file into a Python
string, then call .encode_proposal() passing the segment data
as the "data" argument. The encoder would basically first
split the "data" argument into a list of 64 in-shares of 4096
byte each, and then do the same thing that .encode() does. So
this would result in a little bit more copying of data and a
little bit higher of a "maximum memory usage" during the
process, although it might or might not make a practical
difference for our current use cases.
Note that "inshares" is a strange name for the parameter if
you think of the parameter as being just for feeding in data
to the codec. It makes more sense if you think of the result
of this encoding as being the set of shares from inshares plus
an extra set of "secondary shares" (or "check shares"). It is
a surprising name! If the API is going to be surprising then
the name should be surprising. If we switch to
encode_proposal() above then we should also switch to an
unsurprising name.
'desired_share_ids', if provided, is required to be a sequence of
ints, each of which is required to be >= 0 and < max_shares. If not
provided, encode() will produce 'max_shares' shares, as if
'desired_share_ids' were set to range(max_shares). You might use this
if you initially thought you were going to use 10 peers, started
encoding, and then two of the peers dropped out: you could use
desired_share_ids= to skip the work (both memory and CPU) of
producing shares for the peers that are no longer available.
For each call, encode() will return a Deferred that fires with two
lists, one containing shares and the other containing the shareids.
The get_share_size() method can be used to determine the length of
the share strings returned by encode(). Each shareid is a small
integer, exactly as passed into 'desired_share_ids' (or
range(max_shares), if desired_share_ids was not provided).
The shares and their corresponding shareids are required to be kept
together during storage and retrieval. Specifically, the share data is
useless by itself: the decoder needs to be told which share is which
by providing it with both the shareid and the actual share data.
This function will allocate an amount of memory roughly equal to::
(max_shares - required_shares) * get_share_size()
When combined with the memory that the caller must allocate to
provide the input data, this leads to a memory footprint roughly
equal to the size of the resulting encoded shares (i.e. the expansion
factor times the size of the input segment).
"""
# rejected ideas:
#
# returning a list of (shareidN,shareN) tuples instead of a pair of
# lists (shareids..,shares..). Brian thought the tuples would
# encourage users to keep the share and shareid together throughout
# later processing, Zooko pointed out that the code to iterate
# through two lists is not really more complicated than using a list
# of tuples and there's also a performance improvement
#
# having 'data_size' not required to be an integral multiple of
# 'required_shares'. Doing this would require encode() to perform
# padding internally, and we'd prefer to have any padding be done
# explicitly by the caller. Yes, it is an abstraction leak, but
# hopefully not an onerous one.
class ICodecDecoder(Interface):
def set_params(data_size, required_shares, max_shares):
"""Set the params. They have to be exactly the same ones that were
used for encoding."""
def get_needed_shares():
"""Return the number of shares needed to reconstruct the data.
set_params() is required to be called before this."""
def decode(some_shares, their_shareids):
"""Decode a partial list of shares into data.
'some_shares' is required to be a sequence of buffers of sharedata, a
subset of the shares returned by ICodecEncode.encode(). Each share is
required to be of the same length. The i'th element of their_shareids
is required to be the shareid of the i'th buffer in some_shares.
This returns a Deferred that fires with a sequence of buffers. This
sequence will contain all of the segments of the original data, in
order. The sum of the lengths of all of the buffers will be the
'data_size' value passed into the original ICodecEncode.set_params()
call. To get back the single original input block of data, use
''.join(output_buffers), or you may wish to simply write them in
order to an output file.
Note that some of the elements in the result sequence may be
references to the elements of the some_shares input sequence. In
particular, this means that if those share objects are mutable (e.g.
arrays) and if they are changed, then both the input (the
'some_shares' parameter) and the output (the value given when the
deferred is triggered) will change.
The length of 'some_shares' is required to be exactly the value of
'required_shares' passed into the original ICodecEncode.set_params()
call.
"""
class IEncoder(Interface):
"""I take an object that provides IEncryptedUploadable, which provides
encrypted data, and a list of shareholders. I then encode, hash, and
deliver shares to those shareholders. I will compute all the necessary
Merkle hash trees that are necessary to validate the crypttext that
eventually comes back from the shareholders. I provide the URI Extension
Block Hash, and the encoding parameters, both of which must be included
in the URI.
I do not choose shareholders, that is left to the IUploader. I must be
given a dict of RemoteReferences to storage buckets that are ready and
willing to receive data.
"""
def set_size(size):
"""Specify the number of bytes that will be encoded. This must be
peformed before get_serialized_params() can be called.
"""
def set_encrypted_uploadable(u):
"""Provide a source of encrypted upload data. 'u' must implement
IEncryptedUploadable.
When this is called, the IEncryptedUploadable will be queried for its
length and the storage_index that should be used.
This returns a Deferred that fires with this Encoder instance.
This must be performed before start() can be called.
"""
def get_param(name):
"""Return an encoding parameter, by name.
'storage_index': return a string with the (16-byte truncated SHA-256
hash) storage index to which these shares should be
pushed.
'share_counts': return a tuple describing how many shares are used:
(needed_shares, servers_of_happiness, total_shares)
'num_segments': return an int with the number of segments that
will be encoded.
'segment_size': return an int with the size of each segment.
'block_size': return the size of the individual blocks that will
be delivered to a shareholder's put_block() method. By
knowing this, the shareholder will be able to keep all
blocks in a single file and still provide random access
when reading them. # TODO: can we avoid exposing this?
'share_size': an int with the size of the data that will be stored
on each shareholder. This is aggregate amount of data
that will be sent to the shareholder, summed over all
the put_block() calls I will ever make. It is useful to
determine this size before asking potential
shareholders whether they will grant a lease or not,
since their answers will depend upon how much space we
need. TODO: this might also include some amount of
overhead, like the size of all the hashes. We need to
decide whether this is useful or not.
'serialized_params': a string with a concise description of the
codec name and its parameters. This may be passed
into the IUploadable to let it make sure that
the same file encoded with different parameters
will result in different storage indexes.
Once this is called, set_size() and set_params() may not be called.
"""
def set_shareholders(shareholders, servermap):
"""Tell the encoder where to put the encoded shares. 'shareholders'
must be a dictionary that maps share number (an integer ranging from
0 to n-1) to an instance that provides IStorageBucketWriter.
'servermap' is a dictionary that maps share number (as defined above)
to a set of peerids. This must be performed before start() can be
called."""
def start():
"""Begin the encode/upload process. This involves reading encrypted
data from the IEncryptedUploadable, encoding it, uploading the shares
to the shareholders, then sending the hash trees.
set_encrypted_uploadable() and set_shareholders() must be called
before this can be invoked.
This returns a Deferred that fires with a verify cap when the upload
process is complete. The verifycap, plus the encryption key, is
sufficient to construct the read cap.
"""
class IDecoder(Interface):
"""I take a list of shareholders and some setup information, then
download, validate, decode, and decrypt data from them, writing the
results to an output file.
I do not locate the shareholders, that is left to the IDownloader. I must
be given a dict of RemoteReferences to storage buckets that are ready to
send data.
"""
def setup(outfile):
"""I take a file-like object (providing write and close) to which all
the plaintext data will be written.
TODO: producer/consumer . Maybe write() should return a Deferred that
indicates when it will accept more data? But probably having the
IDecoder be a producer is easier to glue to IConsumer pieces.
"""
def set_shareholders(shareholders):
"""I take a dictionary that maps share identifiers (small integers)
to RemoteReferences that provide RIBucketReader. This must be called
before start()."""
def start():
"""I start the download. This process involves retrieving data and
hash chains from the shareholders, using the hashes to validate the
data, decoding the shares into segments, decrypting the segments,
then writing the resulting plaintext to the output file.
I return a Deferred that will fire (with self) when the download is
complete.
"""
class IDownloadTarget(Interface):
# Note that if the IDownloadTarget is also an IConsumer, the downloader
# will register itself as a producer. This allows the target to invoke
# downloader.pauseProducing, resumeProducing, and stopProducing.
def open(size):
"""Called before any calls to write() or close(). If an error
occurs before any data is available, fail() may be called without
a previous call to open().
'size' is the length of the file being downloaded, in bytes."""
def write(data):
"""Output some data to the target."""
def close():
"""Inform the target that there is no more data to be written."""
def fail(why):
"""fail() is called to indicate that the download has failed. 'why'
is a Failure object indicating what went wrong. No further methods
will be invoked on the IDownloadTarget after fail()."""
def register_canceller(cb):
"""The CiphertextDownloader uses this to register a no-argument function
that the target can call to cancel the download. Once this canceller
is invoked, no further calls to write() or close() will be made."""
def finish():
"""When the CiphertextDownloader is done, this finish() function will be
called. Whatever it returns will be returned to the invoker of
Downloader.download.
"""
class IDownloader(Interface):
def download(uri, target):
"""Perform a CHK download, sending the data to the given target.
'target' must provide IDownloadTarget.
Returns a Deferred that fires (with the results of target.finish)
when the download is finished, or errbacks if something went wrong."""
class IEncryptedUploadable(Interface):
def set_upload_status(upload_status):
"""Provide an IUploadStatus object that should be filled with status
information. The IEncryptedUploadable is responsible for setting
key-determination progress ('chk'), size, storage_index, and
ciphertext-fetch progress. It may delegate some of this
responsibility to others, in particular to the IUploadable."""
def get_size():
"""This behaves just like IUploadable.get_size()."""
def get_all_encoding_parameters():
"""Return a Deferred that fires with a tuple of
(k,happy,n,segment_size). The segment_size will be used as-is, and
must match the following constraints: it must be a multiple of k, and
it shouldn't be unreasonably larger than the file size (if
segment_size is larger than filesize, the difference must be stored
as padding).
This usually passes through to the IUploadable method of the same
name.
The encoder strictly obeys the values returned by this method. To
make an upload use non-default encoding parameters, you must arrange
to control the values that this method returns.
"""
def get_storage_index():
"""Return a Deferred that fires with a 16-byte storage index.
"""
def read_encrypted(length, hash_only):
"""This behaves just like IUploadable.read(), but returns crypttext
instead of plaintext. If hash_only is True, then this discards the
data (and returns an empty list); this improves efficiency when
resuming an interrupted upload (where we need to compute the
plaintext hashes, but don't need the redundant encrypted data)."""
def close():
"""Just like IUploadable.close()."""
class IUploadable(Interface):
def set_upload_status(upload_status):
"""Provide an IUploadStatus object that should be filled with status
information. The IUploadable is responsible for setting
key-determination progress ('chk')."""
def set_default_encoding_parameters(params):
"""Set the default encoding parameters, which must be a dict mapping
strings to ints. The meaningful keys are 'k', 'happy', 'n', and
'max_segment_size'. These might have an influence on the final
encoding parameters returned by get_all_encoding_parameters(), if the
Uploadable doesn't have more specific preferences.
This call is optional: if it is not used, the Uploadable will use
some built-in defaults. If used, this method must be called before
any other IUploadable methods to have any effect.
"""
def get_size():
"""Return a Deferred that will fire with the length of the data to be
uploaded, in bytes. This will be called before the data is actually
used, to compute encoding parameters.
"""
def get_all_encoding_parameters():
"""Return a Deferred that fires with a tuple of
(k,happy,n,segment_size). The segment_size will be used as-is, and
must match the following constraints: it must be a multiple of k, and
it shouldn't be unreasonably larger than the file size (if
segment_size is larger than filesize, the difference must be stored
as padding).
The relative values of k and n allow some IUploadables to request
better redundancy than others (in exchange for consuming more space
in the grid).
Larger values of segment_size reduce hash overhead, while smaller
values reduce memory footprint and cause data to be delivered in
smaller pieces (which may provide a smoother and more predictable
download experience).
The encoder strictly obeys the values returned by this method. To
make an upload use non-default encoding parameters, you must arrange
to control the values that this method returns. One way to influence
them may be to call set_encoding_parameters() before calling
get_all_encoding_parameters().
"""
def get_encryption_key():
"""Return a Deferred that fires with a 16-byte AES key. This key will
be used to encrypt the data. The key will also be hashed to derive
the StorageIndex.
Uploadables that want to achieve convergence should hash their file
contents and the serialized_encoding_parameters to form the key
(which of course requires a full pass over the data). Uploadables can
use the upload.ConvergentUploadMixin class to achieve this
automatically.
Uploadables that do not care about convergence (or do not wish to
make multiple passes over the data) can simply return a
strongly-random 16 byte string.
get_encryption_key() may be called multiple times: the IUploadable is
required to return the same value each time.
"""
def read(length):
"""Return a Deferred that fires with a list of strings (perhaps with
only a single element) that, when concatenated together, contain the
next 'length' bytes of data. If EOF is near, this may provide fewer
than 'length' bytes. The total number of bytes provided by read()
before it signals EOF must equal the size provided by get_size().
If the data must be acquired through multiple internal read
operations, returning a list instead of a single string may help to
reduce string copies. However, the length of the concatenated strings
must equal the amount of data requested, unless EOF is encountered.
Long reads, or short reads without EOF, are not allowed. read()
should return the same amount of data as a local disk file read, just
in a different shape and asynchronously.
'length' will typically be equal to (min(get_size(),1MB)/req_shares),
so a 10kB file means length=3kB, 100kB file means length=30kB,
and >=1MB file means length=300kB.
This method provides for a single full pass through the data. Later
use cases may desire multiple passes or access to only parts of the
data (such as a mutable file making small edits-in-place). This API
will be expanded once those use cases are better understood.
"""
def close():
"""The upload is finished, and whatever filehandle was in use may be
closed."""
class IMutableUploadable(Interface):
"""
I represent content that is due to be uploaded to a mutable filecap.
"""
# This is somewhat simpler than the IUploadable interface above
# because mutable files do not need to be concerned with possibly
# generating a CHK, nor with per-file keys. It is a subset of the
# methods in IUploadable, though, so we could just as well implement
# the mutable uploadables as IUploadables that don't happen to use
# those methods (with the understanding that the unused methods will
# never be called on such objects)
def get_size():
"""
Returns a Deferred that fires with the size of the content held
by the uploadable.
"""
def read(length):
"""
Returns a list of strings that, when concatenated, are the next
length bytes of the file, or fewer if there are fewer bytes
between the current location and the end of the file.
"""
def close():
"""
The process that used the Uploadable is finished using it, so
the uploadable may be closed.
"""
class IUploadResults(Interface):
"""I am returned by immutable upload() methods and contain the results of
the upload.
Note that some of my methods return empty values (0 or an empty dict)
when called for non-distributed LIT files."""
def get_file_size():
"""Return the file size, in bytes."""
def get_uri():
"""Return the (string) URI of the object uploaded, a CHK readcap."""
def get_ciphertext_fetched():
"""Return the number of bytes fetched by the helpe for this upload,
or 0 if the helper did not need to fetch any bytes (or if there was
no helper)."""
def get_preexisting_shares():
"""Return the number of shares that were already present in the grid."""
def get_pushed_shares():
"""Return the number of shares that were uploaded."""
def get_sharemap():
"""Return a dict mapping share identifier to set of IServer
instances. This indicates which servers were given which shares. For
immutable files, the shareid is an integer (the share number, from 0
to N-1). For mutable files, it is a string of the form
'seq%d-%s-sh%d', containing the sequence number, the roothash, and
the share number."""
def get_servermap():
"""Return dict mapping IServer instance to a set of share numbers."""
def get_timings():
"""Return dict of timing information, mapping name to seconds. All
times are floats:
total : total upload time, start to finish
storage_index : time to compute the storage index
peer_selection : time to decide which peers will be used
contacting_helper : initial helper query to upload/no-upload decision
helper_total : initial helper query to helper finished pushing
cumulative_fetch : helper waiting for ciphertext requests
total_fetch : helper start to last ciphertext response
cumulative_encoding : just time spent in zfec
cumulative_sending : just time spent waiting for storage servers
hashes_and_close : last segment push to shareholder close
total_encode_and_push : first encode to shareholder close
"""
def get_uri_extension_data():
"""Return the dict of UEB data created for this file."""
def get_verifycapstr():
"""Return the (string) verify-cap URI for the uploaded object."""
class IDownloadResults(Interface):
"""I am created internally by download() methods. I contain a number of
public attributes that contain details about the download process.::
.file_size : the size of the file, in bytes
.servers_used : set of server peerids that were used during download
.server_problems : dict mapping server peerid to a problem string. Only
servers that had problems (bad hashes, disconnects)
are listed here.
.servermap : dict mapping server peerid to a set of share numbers. Only
servers that had any shares are listed here.
.timings : dict of timing information, mapping name to seconds (float)
peer_selection : time to ask servers about shares
servers_peer_selection : dict of peerid to DYHB-query time
uri_extension : time to fetch a copy of the URI extension block
hashtrees : time to fetch the hash trees
segments : time to fetch, decode, and deliver segments
cumulative_fetch : time spent waiting for storage servers
cumulative_decode : just time spent in zfec
cumulative_decrypt : just time spent in decryption
total : total download time, start to finish
fetch_per_server : dict of server to list of per-segment fetch times
"""
class IUploader(Interface):
def upload(uploadable):
"""Upload the file. 'uploadable' must impement IUploadable. This
returns a Deferred that fires with an IUploadResults instance, from
which the URI of the file can be obtained as results.uri ."""
class ICheckable(Interface):
def check(monitor, verify=False, add_lease=False):
"""Check up on my health, optionally repairing any problems.
This returns a Deferred that fires with an instance that provides
ICheckResults, or None if the object is non-distributed (i.e. LIT
files).
The monitor will be checked periodically to see if the operation has
been cancelled. If so, no new queries will be sent, and the Deferred
will fire (with a OperationCancelledError) immediately.
Filenodes and dirnodes (which provide IFilesystemNode) are also
checkable. Instances that represent verifier-caps will be checkable
but not downloadable. Some objects (like LIT files) do not actually
live in the grid, and their checkers return None (non-distributed
files are always healthy).
If verify=False, a relatively lightweight check will be performed: I
will ask all servers if they have a share for me, and I will believe
whatever they say. If there are at least N distinct shares on the
grid, my results will indicate r.is_healthy()==True. This requires a
roundtrip to each server, but does not transfer very much data, so
the network bandwidth is fairly low.
If verify=True, a more resource-intensive check will be performed:
every share will be downloaded, and the hashes will be validated on
every bit. I will ignore any shares that failed their hash checks. If
there are at least N distinct valid shares on the grid, my results
will indicate r.is_healthy()==True. This requires N/k times as much
download bandwidth (and server disk IO) as a regular download. If a
storage server is holding a corrupt share, or is experiencing memory
failures during retrieval, or is malicious or buggy, then
verification will detect the problem, but checking will not.
If add_lease=True, I will ensure that an up-to-date lease is present
on each share. The lease secrets will be derived from by node secret
(in BASEDIR/private/secret), so either I will add a new lease to the
share, or I will merely renew the lease that I already had. In a
future version of the storage-server protocol (once Accounting has
been implemented), there may be additional options here to define the
kind of lease that is obtained (which account number to claim, etc).
TODO: any problems seen during checking will be reported to the
health-manager.furl, a centralized object that is responsible for
figuring out why files are unhealthy so corrective action can be
taken.
"""
def check_and_repair(monitor, verify=False, add_lease=False):
"""Like check(), but if the file/directory is not healthy, attempt to
repair the damage.
Any non-healthy result will cause an immediate repair operation, to
generate and upload new shares. After repair, the file will be as
healthy as we can make it. Details about what sort of repair is done
will be put in the check-and-repair results. The Deferred will not
fire until the repair is complete.
This returns a Deferred that fires with an instance of
ICheckAndRepairResults."""
class IDeepCheckable(Interface):
def start_deep_check(verify=False, add_lease=False):
"""Check upon the health of me and everything I can reach.
This is a recursive form of check(), useable only on dirnodes.
I return a Monitor, with results that are an IDeepCheckResults
object.
TODO: If any of the directories I traverse are unrecoverable, the
Monitor will report failure. If any of the files I check upon are
unrecoverable, those problems will be reported in the
IDeepCheckResults as usual, and the Monitor will not report a
failure.
"""
def start_deep_check_and_repair(verify=False, add_lease=False):
"""Check upon the health of me and everything I can reach. Repair
anything that isn't healthy.
This is a recursive form of check_and_repair(), useable only on
dirnodes.
I return a Monitor, with results that are an
IDeepCheckAndRepairResults object.
TODO: If any of the directories I traverse are unrecoverable, the
Monitor will report failure. If any of the files I check upon are
unrecoverable, those problems will be reported in the
IDeepCheckResults as usual, and the Monitor will not report a
failure.
"""
class ICheckResults(Interface):
"""I contain the detailed results of a check/verify operation.
"""
def get_storage_index():
"""Return a string with the (binary) storage index."""
def get_storage_index_string():
"""Return a string with the (printable) abbreviated storage index."""
def get_uri():
"""Return the (string) URI of the object that was checked."""
def is_healthy():
"""Return a boolean, True if the file/dir is fully healthy, False if
it is damaged in any way. Non-distributed LIT files always return
True."""
def is_recoverable():
"""Return a boolean, True if the file/dir can be recovered, False if
not. Unrecoverable files are obviously unhealthy. Non-distributed LIT
files always return True."""
def needs_rebalancing():
"""Return a boolean, True if the file/dir's reliability could be
improved by moving shares to new servers. Non-distributed LIT files
always return False."""
# the following methods all return None for non-distributed LIT files
def get_encoding_needed():
"""Return 'k', the number of shares required for recovery"""
def get_encoding_expected():
"""Return 'N', the number of total shares generated"""
def get_share_counter_good():
"""Return the number of distinct good shares that were found. For
mutable files, this counts shares for the 'best' version."""
def get_share_counter_wrong():
"""For mutable files, return the number of shares for versions other
than the 'best' one (which is defined as being the recoverable
version with the highest sequence number, then the highest roothash).
These are either leftover shares from an older version (perhaps on a
server that was offline when an update occurred), shares from an
unrecoverable newer version, or shares from an alternate current
version that results from an uncoordinated write collision. For a
healthy file, this will equal 0. For immutable files, this will
always equal 0."""
def get_corrupt_shares():
"""Return a list of 'share locators', one for each share that was
found to be corrupt (integrity failure). Each share locator is a list
of (IServer, storage_index, sharenum)."""
def get_incompatible_shares():
"""Return a list of 'share locators', one for each share that was
found to be of an unknown format. Each share locator is a list of
(IServer, storage_index, sharenum)."""
def get_servers_responding():
"""Return a list of IServer objects, one for each server that
responded to the share query (even if they said they didn't have
shares, and even if they said they did have shares but then didn't
send them when asked, or dropped the connection, or returned a
Failure, and even if they said they did have shares and sent
incorrect ones when asked)"""
def get_host_counter_good_shares():
"""Return the number of distinct storage servers with good shares. If
this number is less than get_share_counters()[good], then some shares
are doubled up, increasing the correlation of failures. This
indicates that one or more shares should be moved to an otherwise
unused server, if one is available.
"""
def get_version_counter_recoverable():
"""Return the number of recoverable versions of the file. For a
healthy file, this will equal 1."""
def get_version_counter_unrecoverable():
"""Return the number of unrecoverable versions of the file. For a
healthy file, this will be 0."""
def get_sharemap():
"""Return a dict mapping share identifier to list of IServer objects.
This indicates which servers are holding which shares. For immutable
files, the shareid is an integer (the share number, from 0 to N-1).
For mutable files, it is a string of the form 'seq%d-%s-sh%d',
containing the sequence number, the roothash, and the share number."""
def get_summary():
"""Return a string with a brief (one-line) summary of the results."""
def get_report():
"""Return a list of strings with more detailed results."""
class ICheckAndRepairResults(Interface):
"""I contain the detailed results of a check/verify/repair operation.
The IFilesystemNode.check()/verify()/repair() methods all return
instances that provide ICheckAndRepairResults.
"""
def get_storage_index():
"""Return a string with the (binary) storage index."""
def get_storage_index_string():
"""Return a string with the (printable) abbreviated storage index."""
def get_repair_attempted():
"""Return a boolean, True if a repair was attempted. We might not
attempt to repair the file because it was healthy, or healthy enough
(i.e. some shares were missing but not enough to exceed some
threshold), or because we don't know how to repair this object."""
def get_repair_successful():
"""Return a boolean, True if repair was attempted and the file/dir
was fully healthy afterwards. False if no repair was attempted or if
a repair attempt failed."""
def get_pre_repair_results():
"""Return an ICheckResults instance that describes the state of the
file/dir before any repair was attempted."""
def get_post_repair_results():
"""Return an ICheckResults instance that describes the state of the
file/dir after any repair was attempted. If no repair was attempted,
the pre-repair and post-repair results will be identical."""
class IDeepCheckResults(Interface):
"""I contain the results of a deep-check operation.
This is returned by a call to ICheckable.deep_check().
"""
def get_root_storage_index_string():
"""Return the storage index (abbreviated human-readable string) of
the first object checked."""
def get_counters():
"""Return a dictionary with the following keys::
count-objects-checked: count of how many objects were checked
count-objects-healthy: how many of those objects were completely
healthy
count-objects-unhealthy: how many were damaged in some way
count-objects-unrecoverable: how many were unrecoverable
count-corrupt-shares: how many shares were found to have
corruption, summed over all objects
examined
"""
def get_corrupt_shares():
"""Return a set of (IServer, storage_index, sharenum) for all shares
that were found to be corrupt. storage_index is binary."""
def get_all_results():
"""Return a dictionary mapping pathname (a tuple of strings, ready to
be slash-joined) to an ICheckResults instance, one for each object
that was checked."""
def get_results_for_storage_index(storage_index):
"""Retrive the ICheckResults instance for the given (binary)
storage index. Raises KeyError if there are no results for that
storage index."""
def get_stats():
"""Return a dictionary with the same keys as
IDirectoryNode.deep_stats()."""
class IDeepCheckAndRepairResults(Interface):
"""I contain the results of a deep-check-and-repair operation.
This is returned by a call to ICheckable.deep_check_and_repair().
"""
def get_root_storage_index_string():
"""Return the storage index (abbreviated human-readable string) of
the first object checked."""
def get_counters():
"""Return a dictionary with the following keys::
count-objects-checked: count of how many objects were checked
count-objects-healthy-pre-repair: how many of those objects were
completely healthy (before any
repair)
count-objects-unhealthy-pre-repair: how many were damaged in
some way
count-objects-unrecoverable-pre-repair: how many were unrecoverable
count-objects-healthy-post-repair: how many of those objects were
completely healthy (after any
repair)
count-objects-unhealthy-post-repair: how many were damaged in
some way
count-objects-unrecoverable-post-repair: how many were
unrecoverable
count-repairs-attempted: repairs were attempted on this many
objects. The count-repairs- keys will
always be provided, however unless
repair=true is present, they will all
be zero.
count-repairs-successful: how many repairs resulted in healthy
objects
count-repairs-unsuccessful: how many repairs resulted did not
results in completely healthy objects
count-corrupt-shares-pre-repair: how many shares were found to
have corruption, summed over all
objects examined (before any
repair)
count-corrupt-shares-post-repair: how many shares were found to
have corruption, summed over all
objects examined (after any
repair)
"""
def get_stats():
"""Return a dictionary with the same keys as
IDirectoryNode.deep_stats()."""
def get_corrupt_shares():
"""Return a set of (IServer, storage_index, sharenum) for all shares
that were found to be corrupt before any repair was attempted.
storage_index is binary.
"""
def get_remaining_corrupt_shares():
"""Return a set of (IServer, storage_index, sharenum) for all shares
that were found to be corrupt after any repair was completed.
storage_index is binary. These are shares that need manual inspection
and probably deletion.
"""
def get_all_results():
"""Return a dictionary mapping pathname (a tuple of strings, ready to
be slash-joined) to an ICheckAndRepairResults instance, one for each
object that was checked."""
def get_results_for_storage_index(storage_index):
"""Retrive the ICheckAndRepairResults instance for the given (binary)
storage index. Raises KeyError if there are no results for that
storage index."""
class IRepairable(Interface):
def repair(check_results):
"""Attempt to repair the given object. Returns a Deferred that fires
with a IRepairResults object.
I must be called with an object that implements ICheckResults, as
proof that you have actually discovered a problem with this file. I
will use the data in the checker results to guide the repair process,
such as which servers provided bad data and should therefore be
avoided. The ICheckResults object is inside the
ICheckAndRepairResults object, which is returned by the
ICheckable.check() method::
d = filenode.check(repair=False)
def _got_results(check_and_repair_results):
check_results = check_and_repair_results.get_pre_repair_results()
return filenode.repair(check_results)
d.addCallback(_got_results)
return d
"""
class IRepairResults(Interface):
"""I contain the results of a repair operation."""
def get_successful():
"""Returns a boolean: True if the repair made the file healthy, False
if not. Repair failure generally indicates a file that has been
damaged beyond repair."""
class IClient(Interface):
def upload(uploadable):
"""Upload some data into a CHK, get back the UploadResults for it.
@param uploadable: something that implements IUploadable
@return: a Deferred that fires with the UploadResults instance.
To get the URI for this file, use results.uri .
"""
def create_mutable_file(contents=""):
"""Create a new mutable file (with initial) contents, get back the
new node instance.
@param contents: (bytestring, callable, or None): this provides the
initial contents of the mutable file. If 'contents' is a bytestring,
it will be used as-is. If 'contents' is a callable, it will be
invoked with the new MutableFileNode instance and is expected to
return a bytestring with the initial contents of the file (the
callable can use node.get_writekey() to decide how to encrypt the
initial contents, e.g. for a brand new dirnode with initial
children). contents=None is equivalent to an empty string. Using
content_maker= is more efficient than creating a mutable file and
setting its contents in two separate operations.
@return: a Deferred that fires with an IMutableFileNode instance.
"""
def create_dirnode(initial_children={}):
"""Create a new unattached dirnode, possibly with initial children.
@param initial_children: dict with keys that are unicode child names,
and values that are (childnode, metadata) tuples.
@return: a Deferred that fires with the new IDirectoryNode instance.
"""
def create_node_from_uri(uri, rouri):
"""Create a new IFilesystemNode instance from the uri, synchronously.
@param uri: a string or IURI-providing instance, or None. This could
be for a LiteralFileNode, a CHK file node, a mutable file
node, or a directory node
@param rouri: a string or IURI-providing instance, or None. If the
main uri is None, I will use the rouri instead. If I
recognize the format of the main uri, I will ignore the
rouri (because it can be derived from the writecap).
@return: an instance that provides IFilesystemNode (or more usefully
one of its subclasses). File-specifying URIs will result in
IFileNode-providing instances, like ImmutableFileNode,
LiteralFileNode, or MutableFileNode. Directory-specifying
URIs will result in IDirectoryNode-providing instances, like
DirectoryNode.
"""
class INodeMaker(Interface):
"""The NodeMaker is used to create IFilesystemNode instances. It can
accept a filecap/dircap string and return the node right away. It can
also create new nodes (i.e. upload a file, or create a mutable file)
asynchronously. Once you have one of these nodes, you can use other
methods to determine whether it is a file or directory, and to download
or modify its contents.
The NodeMaker encapsulates all the authorities that these
IFilesystemNodes require (like references to the StorageFarmBroker). Each
Tahoe process will typically have a single NodeMaker, but unit tests may
create simplified/mocked forms for testing purposes.
"""
def create_from_cap(writecap, readcap=None, deep_immutable=False, name=u"<unknown name>"):
"""I create an IFilesystemNode from the given writecap/readcap. I can
only provide nodes for existing file/directory objects: use my other
methods to create new objects. I return synchronously."""
def create_mutable_file(contents=None, keysize=None):
"""I create a new mutable file, and return a Deferred that will fire
with the IMutableFileNode instance when it is ready. If contents= is
provided (a bytestring), it will be used as the initial contents of
the new file, otherwise the file will contain zero bytes. keysize= is
for use by unit tests, to create mutable files that are smaller than
usual."""
def create_new_mutable_directory(initial_children={}):
"""I create a new mutable directory, and return a Deferred that will
fire with the IDirectoryNode instance when it is ready. If
initial_children= is provided (a dict mapping unicode child name to
(childnode, metadata_dict) tuples), the directory will be populated
with those children, otherwise it will be empty."""
class IClientStatus(Interface):
def list_all_uploads():
"""Return a list of uploader objects, one for each upload that
currently has an object available (tracked with weakrefs). This is
intended for debugging purposes."""
def list_active_uploads():
"""Return a list of active IUploadStatus objects."""
def list_recent_uploads():
"""Return a list of IUploadStatus objects for the most recently
started uploads."""
def list_all_downloads():
"""Return a list of downloader objects, one for each download that
currently has an object available (tracked with weakrefs). This is
intended for debugging purposes."""
def list_active_downloads():
"""Return a list of active IDownloadStatus objects."""
def list_recent_downloads():
"""Return a list of IDownloadStatus objects for the most recently
started downloads."""
class IUploadStatus(Interface):
def get_started():
"""Return a timestamp (float with seconds since epoch) indicating
when the operation was started."""
def get_storage_index():
"""Return a string with the (binary) storage index in use on this
upload. Returns None if the storage index has not yet been
calculated."""
def get_size():
"""Return an integer with the number of bytes that will eventually
be uploaded for this file. Returns None if the size is not yet known.
"""
def using_helper():
"""Return True if this upload is using a Helper, False if not."""
def get_status():
"""Return a string describing the current state of the upload
process."""
def get_progress():
"""Returns a tuple of floats, (chk, ciphertext, encode_and_push),
each from 0.0 to 1.0 . 'chk' describes how much progress has been
made towards hashing the file to determine a CHK encryption key: if
non-convergent encryption is in use, this will be trivial, otherwise
the whole file must be hashed. 'ciphertext' describes how much of the
ciphertext has been pushed to the helper, and is '1.0' for non-helper
uploads. 'encode_and_push' describes how much of the encode-and-push
process has finished: for helper uploads this is dependent upon the
helper providing progress reports. It might be reasonable to add all
three numbers and report the sum to the user."""
def get_active():
"""Return True if the upload is currently active, False if not."""
def get_results():
"""Return an instance of UploadResults (which contains timing and
sharemap information). Might return None if the upload is not yet
finished."""
def get_counter():
"""Each upload status gets a unique number: this method returns that
number. This provides a handle to this particular upload, so a web
page can generate a suitable hyperlink."""
class IDownloadStatus(Interface):
def get_started():
"""Return a timestamp (float with seconds since epoch) indicating
when the operation was started."""
def get_storage_index():
"""Return a string with the (binary) storage index in use on this
download. This may be None if there is no storage index (i.e. LIT
files)."""
def get_size():
"""Return an integer with the number of bytes that will eventually be
retrieved for this file. Returns None if the size is not yet known.
"""
def using_helper():
"""Return True if this download is using a Helper, False if not."""
def get_status():
"""Return a string describing the current state of the download
process."""
def get_progress():
"""Returns a float (from 0.0 to 1.0) describing the amount of the
download that has completed. This value will remain at 0.0 until the
first byte of plaintext is pushed to the download target."""
def get_active():
"""Return True if the download is currently active, False if not."""
def get_counter():
"""Each download status gets a unique number: this method returns
that number. This provides a handle to this particular download, so a
web page can generate a suitable hyperlink."""
class IServermapUpdaterStatus(Interface):
pass
class IPublishStatus(Interface):
pass
class IRetrieveStatus(Interface):
pass
class NotCapableError(Exception):
"""You have tried to write to a read-only node."""
class BadWriteEnablerError(Exception):
pass
class RIControlClient(RemoteInterface):
def wait_for_client_connections(num_clients=int):
"""Do not return until we have connections to at least NUM_CLIENTS
storage servers.
"""
def upload_from_file_to_uri(filename=str,
convergence=ChoiceOf(None,
StringConstraint(2**20))):
"""Upload a file to the grid. This accepts a filename (which must be
absolute) that points to a file on the node's local disk. The node will
read the contents of this file, upload it to the grid, then return the
URI at which it was uploaded. If convergence is None then a random
encryption key will be used, else the plaintext will be hashed, then
that hash will be mixed together with the "convergence" string to form
the encryption key.
"""
return URI
def download_from_uri_to_file(uri=URI, filename=str):
"""Download a file from the grid, placing it on the node's local disk
at the given filename (which must be absolute[?]). Returns the
absolute filename where the file was written."""
return str
# debug stuff
def get_memory_usage():
"""Return a dict describes the amount of memory currently in use. The
keys are 'VmPeak', 'VmSize', and 'VmData'. The values are integers,
measuring memory consupmtion in bytes."""
return DictOf(str, int)
def speed_test(count=int, size=int, mutable=Any()):
"""Write 'count' tempfiles to disk, all of the given size. Measure
how long (in seconds) it takes to upload them all to the servers.
Then measure how long it takes to download all of them. If 'mutable'
is 'create', time creation of mutable files. If 'mutable' is
'upload', then time access to the same mutable file instead of
creating one.
Returns a tuple of (upload_time, download_time).
"""
return (float, float)
def measure_peer_response_time():
"""Send a short message to each connected peer, and measure the time
it takes for them to respond to it. This is a rough measure of the
application-level round trip time.
@return: a dictionary mapping peerid to a float (RTT time in seconds)
"""
return DictOf(str, float)
UploadResults = Any() #DictOf(str, str)
class RIEncryptedUploadable(RemoteInterface):
__remote_name__ = "RIEncryptedUploadable.tahoe.allmydata.com"
def get_size():
return Offset
def get_all_encoding_parameters():
return (int, int, int, long)
def read_encrypted(offset=Offset, length=ReadSize):
return ListOf(str)
def close():
return None
class RICHKUploadHelper(RemoteInterface):
__remote_name__ = "RIUploadHelper.tahoe.allmydata.com"
def get_version():
"""
Return a dictionary of version information.
"""
return DictOf(str, Any())
def upload(reader=RIEncryptedUploadable):
return UploadResults
class RIHelper(RemoteInterface):
__remote_name__ = "RIHelper.tahoe.allmydata.com"
def get_version():
"""
Return a dictionary of version information.
"""
return DictOf(str, Any())
def upload_chk(si=StorageIndex):
"""See if a file with a given storage index needs uploading. The
helper will ask the appropriate storage servers to see if the file
has already been uploaded. If so, the helper will return a set of
'upload results' that includes whatever hashes are needed to build
the read-cap, and perhaps a truncated sharemap.
If the file has not yet been uploaded (or if it was only partially
uploaded), the helper will return an empty upload-results dictionary
and also an RICHKUploadHelper object that will take care of the
upload process. The client should call upload() on this object and
pass it a reference to an RIEncryptedUploadable object that will
provide ciphertext. When the upload is finished, the upload() method
will finish and return the upload results.
"""
return (UploadResults, ChoiceOf(RICHKUploadHelper, None))
class RIStatsProvider(RemoteInterface):
__remote_name__ = "RIStatsProvider.tahoe.allmydata.com"
"""
Provides access to statistics and monitoring information.
"""
def get_stats():
"""
returns a dictionary containing 'counters' and 'stats', each a
dictionary with string counter/stat name keys, and numeric or None values.
counters are monotonically increasing measures of work done, and
stats are instantaneous measures (potentially time averaged
internally)
"""
return DictOf(str, DictOf(str, ChoiceOf(float, int, long, None)))
class RIStatsGatherer(RemoteInterface):
__remote_name__ = "RIStatsGatherer.tahoe.allmydata.com"
"""
Provides a monitoring service for centralised collection of stats
"""
def provide(provider=RIStatsProvider, nickname=str):
"""
@param provider: a stats collector instance that should be polled
periodically by the gatherer to collect stats.
@param nickname: a name useful to identify the provided client
"""
return None
class IStatsProducer(Interface):
def get_stats():
"""
returns a dictionary, with str keys representing the names of stats
to be monitored, and numeric values.
"""
class RIKeyGenerator(RemoteInterface):
__remote_name__ = "RIKeyGenerator.tahoe.allmydata.com"
"""
Provides a service offering to make RSA key pairs.
"""
def get_rsa_key_pair(key_size=int):
"""
@param key_size: the size of the signature key.
@return: tuple(verifying_key, signing_key)
"""
return TupleOf(str, str)
class FileTooLargeError(Exception):
pass
class IValidatedThingProxy(Interface):
def start():
""" Acquire a thing and validate it. Return a deferred that is
eventually fired with self if the thing is valid or errbacked if it
can't be acquired or validated."""
class InsufficientVersionError(Exception):
def __init__(self, needed, got):
self.needed = needed
self.got = got
def __repr__(self):
return "InsufficientVersionError(need '%s', got %s)" % (self.needed,
self.got)
class EmptyPathnameComponentError(Exception):
"""The webapi disallows empty pathname components."""
|
kytvi2p/tahoe-lafs
|
src/allmydata/interfaces.py
|
Python
|
gpl-2.0
| 125,772
|
[
"Brian",
"VisIt"
] |
31f49afdf1ca6b247071be9b9a6957671bb8134c14a02624a127c3aff468c004
|
# PyVision License
#
# Copyright (c) 2006-2008 David S. Bolme
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO: document module.
import unittest
import os.path
from numpy import array,ones,zeros,nonzero
from scipy.ndimage import convolve
from scipy.ndimage import gaussian_filter,maximum_filter
# TODO: At some point it would be nice to have the options of prewitt or sobel as filters.
# from scipy.ndimage import prewitt,sobel
from numpy.linalg import det
import pyvision
from pyvision.point.DetectorROI import DetectorROI
from pyvision.types.img import Image
from pyvision.types.Point import Point
conv2 = convolve
class DetectorCorner(DetectorROI):
def __init__(self,filter = [[-1,0,1]], radius=9, sigma=0.7, k=0.04, **kwargs):
'''
Corner Detector
filter - first dirivitive filter
radius - radius of the max filter
sigma - sigma of the smoothing gaussian.
k - not sure what this parameter means.
Passed to superclass:
n - is the approximate number of points requested.
bin_size - the width and height of each bin in pixels.
corner_selector ('all', 'bins', or 'best') - stratagy for point selection.
When corner_selector is set to bins, the image is subdivided in to bins of
size <bin_size>X<bin_size> pixels and an equal number of points will be taken
from each of those bins. This insures that points are found in all parts of the
image not just where the corners are strongest.
This code is based on a function originally written for matlab.
Original matlab code by:
Jingyu Yan and Marc Pollefeys
Department of Computer Science
University of North Carolina at Chapel Hill
Converted to Python by:
David Bolme
Department of Computer Science
Colorado State Univerisity
'''
DetectorROI.__init__(self,**kwargs)
self.filter = filter
self.radius = radius
self.sigma = sigma
self.k = k
def _detect(self,image):
# Asssumes a two dimensional array
A = None
if isinstance(image,Image):
A = image.asMatrix2D()
elif isinstance(image,array) and len(image.shape)==2:
A = image
else:
raise TypeError("ERROR Unknown Type (%s) - Only arrays and pyvision images supported."%type(image))
filter = array(self.filter)
assert len(filter.shape) == 2
#feature window calculation
del_A_1 = conv2(A,filter)
del_A_2 = conv2(A,filter.transpose())
del_A_1_1 = del_A_1 * del_A_1
matrix_1_1 = gaussian_filter(del_A_1_1, self.sigma)
del del_A_1_1
del_A_2_2 = del_A_2 * del_A_2
matrix_2_2 = gaussian_filter(del_A_2_2, self.sigma)
del del_A_2_2
del_A_1_2 = del_A_1 * del_A_2
matrix_1_2 = gaussian_filter(del_A_1_2, self.sigma)
del del_A_1_2
del del_A_1,del_A_2
dM = matrix_1_1*matrix_2_2 - matrix_1_2*matrix_1_2
tM = matrix_1_1+matrix_2_2
del matrix_1_1 , matrix_1_2, matrix_2_2
R = dM-self.k*pow(tM,2)
footprint = ones((self.radius,self.radius))
mx = maximum_filter(R, footprint = footprint)
local_maxima = (R == mx) * (R != zeros(R.shape)) # make sure to remove completly dark points
del mx
points = nonzero(local_maxima)
del local_maxima
points = array([points[0],points[1]]).transpose()
L = []
for each in points:
L.append((R[each[0],each[1]],each[0],each[1],None))
del R
return L
class _CornerTest(unittest.TestCase):
def setUp(self):
self.SHOW_IMAGES = False
def testDetectorCorner1(self):
detector = DetectorCorner()
filename = os.path.join(pyvision.__path__[0],'data','nonface','NONFACE_1.jpg')
im = Image(filename,bw_annotate=True)
points = detector.detect(im)
for score,pt,radius in points:
im.annotatePoint(pt)
if self.SHOW_IMAGES: im.show()
self.assertEquals(len(points),390)
def testDetectorCorner2(self):
detector = DetectorCorner()
filename = os.path.join(pyvision.__path__[0],'data','nonface','NONFACE_19.jpg')
im = Image(filename,bw_annotate=True)
points = detector.detect(im)
for score,pt,radius in points:
im.annotatePoint(pt)
if self.SHOW_IMAGES: im.show()
self.assertEquals(len(points),390)
def testDetectorCorner3(self):
detector = DetectorCorner()
filename = os.path.join(pyvision.__path__[0],'data','nonface','NONFACE_22.jpg')
im = Image(filename,bw_annotate=True)
points = detector.detect(im)
for score,pt,radius in points:
im.annotatePoint(pt)
if self.SHOW_IMAGES: im.show()
self.assertEquals(len(points),390)
def testDetectorCorner4(self):
detector = DetectorCorner()
filename = os.path.join(pyvision.__path__[0],'data','nonface','NONFACE_37.jpg')
im = Image(filename,bw_annotate=True)
points = detector.detect(im)
for score,pt,radius in points:
im.annotatePoint(pt)
if self.SHOW_IMAGES: im.show()
self.assertEquals(len(points),351)
def testDetectorCorner5(self):
detector = DetectorCorner(selector='best')
filename = os.path.join(pyvision.__path__[0],'data','nonface','NONFACE_37.jpg')
im = Image(filename,bw_annotate=True)
points = detector.detect(im)
for score,pt,radius in points:
im.annotatePoint(pt)
if self.SHOW_IMAGES: im.show()
self.assertEquals(len(points),250)
def testDetectorCorner6(self):
detector = DetectorCorner(selector='all')
filename = os.path.join(pyvision.__path__[0],'data','nonface','NONFACE_37.jpg')
im = Image(filename,bw_annotate=True)
points = detector.detect(im)
for score,pt,radius in points:
im.annotatePoint(pt)
if self.SHOW_IMAGES: im.show()
self.assertEquals(len(points),2149)
|
Dfred/concept-robot
|
HRI/vision/pyvision_0.9.0/src/pyvision/point/DetectorCorner.py
|
Python
|
gpl-3.0
| 7,990
|
[
"Gaussian"
] |
86be5ad11a5e67df9721681c92f045a7458c977b75f55350c43c46226159413c
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os.path import join, basename
from os import environ
from qp_shogun.utils import (
_format_params, make_read_pairs_per_sample,
_run_commands, _per_sample_ainfo)
DIR = environ["QC_SORTMERNA_DB_DP"]
RNA_REF_DB = (
'{0}silva-arc-23s-id98.fasta,'
'{0}silva-arc-23s-id98.idx:'
'{0}silva-bac-16s-id90.fasta,'
'{0}silva-bac-16s-id90.idx:'
'{0}silva-bac-23s-id98.fasta,'
'{0}silva-bac-23s-id98.idx:'
'{0}silva-arc-16s-id95.fasta,'
'{0}silva-arc-16s-id95.idx:'
'{0}silva-euk-18s-id95.fasta,'
'{0}silva-euk-18s-id95.idx:'
'{0}silva-euk-28s-id98.fasta,'
'{0}silva-euk-28s-id98.idx:'
'{0}rfam-5s-database-id98.fasta,'
'{0}rfam-5s-database-id98.idx:'
'{0}rfam-5.8s-database-id98.fasta,'
'{0}rfam-5.8s-database-id98.idx'
).format(DIR)
SORTMERNA_PARAMS = {
'blast': 'Output blast format',
'num_alignments': 'Number of alignments',
'a': 'Number of threads',
'm': 'Memory'}
def generate_sortmerna_commands(forward_seqs, reverse_seqs, map_file,
out_dir, parameters):
"""Generates the Sortmerna commands
Parameters
----------
forward_seqs : list of str
The list of forward seqs filepaths
reverse_seqs : list of str
The list of reverse seqs filepaths
map_file : str
The path to the mapping file
out_dir : str
The job output directory
parameters : dict
The command's parameters, keyed by parameter name
Returns
-------
cmds: list of str
The Sortmerna commands
samples: list of tup
list of 4-tuples with run prefix, sample name, fwd read fp, rev read fp
Notes
-----
Currently this is requiring matched pairs in the make_read_pairs_per_sample
step but implicitly allowing empty reverse reads in the actual command
generation. This behavior may allow support of situations with empty
reverse reads in some samples, for example after trimming and QC.
"""
# matching filenames, samples, and run prefixes
samples = make_read_pairs_per_sample(forward_seqs, reverse_seqs, map_file)
cmds = []
param_string = _format_params(parameters, SORTMERNA_PARAMS)
threads = parameters['Number of threads']
# Sortmerna 2.1 does not support direct processing of
# compressed files currently
# note SMR auto-detects file type and adds .fastq extension
# to the generated output files
template = ("unpigz -p {thrds} -c {ip} > {ip_unpigz} && "
"sortmerna --ref {ref_db} --reads {ip_unpigz} "
"--aligned {smr_r_op} --other {smr_nr_op} "
"--fastx {params} && "
"pigz -p {thrds} -c {smr_r_op}.fastq > {smr_r_op_gz} && "
"pigz -p {thrds} -c {smr_nr_op}.fastq > {smr_nr_op_gz};"
)
arguments = {'thrds': threads,
'ref_db': RNA_REF_DB, 'params': param_string}
for run_prefix, sample, f_fp, r_fp in samples:
prefix_path = join(out_dir, run_prefix)
for index, fp in enumerate([f_fp, r_fp]):
# if reverse filepath is not present ignore it
if fp is None:
continue
arguments['ip'] = fp
arguments['ip_unpigz'] = join(
out_dir, basename(fp.replace('.fastq.gz', '.fastq')))
arguments['smr_r_op'] = prefix_path + '.ribosomal.R%d'\
% (index + 1)
arguments['smr_nr_op'] = prefix_path + '.nonribosomal.R%d'\
% (index + 1)
arguments['smr_r_op_gz'] = arguments['smr_r_op'] + '.fastq.gz'
arguments['smr_nr_op_gz'] = arguments['smr_nr_op'] + '.fastq.gz'
cmds.append(template.format(**arguments))
return cmds, samples
# In this version I have not added a summary file or sam file
def sortmerna(qclient, job_id, parameters, out_dir):
"""Run Sortmerna with the given parameters
Parameters
----------
qclient : tgp.qiita_client.QiitaClient
The Qiita server client
job_id : str
The job id
parameters : dict
The parameter values
out_dir : str
The path to the job's output directory
Returns
-------
bool, list, str
The results of the job
"""
# Step 1 get the rest of the information need to run Sortmerna
qclient.update_job_step(job_id, "Step 1 of 4: Collecting information")
artifact_id = parameters['input']
del parameters['input']
# Get the artifact filepath information
artifact_info = qclient.get("/qiita_db/artifacts/%s/" % artifact_id)
fps = artifact_info['files']
# Get the artifact metadata
prep_info = qclient.get('/qiita_db/prep_template/%s/'
% artifact_info['prep_information'][0])
qiime_map = prep_info['qiime-map']
# Step 2 generating command for Sortmerna
qclient.update_job_step(job_id, "Step 2 of 4: Generating"
" SortMeRNA commands")
rs = fps['raw_reverse_seqs'] if 'raw_reverse_seqs' in fps else []
commands, samples = generate_sortmerna_commands(
fps['raw_forward_seqs'],
rs, qiime_map, out_dir,
parameters)
# Step 3 executing Sortmerna
len_cmd = len(commands)
msg = "Step 3 of 4: Executing ribosomal filtering (%d/{0})".format(len_cmd)
success, msg = _run_commands(qclient, job_id,
commands, msg, 'QC_Sortmerna')
if not success:
return False, None, msg
ainfo = []
# Generates 2 artifacts: one for the ribosomal
# reads and other for the non-ribosomal reads
# Step 4 generating artifacts for Nonribosomal reads
msg = ("Step 4 of 5: Generating artifacts "
"for Nonribosomal reads (%d/{0})").format(len_cmd)
suffixes = ['%s.nonribosomal.R1.fastq.gz', '%s.nonribosomal.R2.fastq.gz']
prg_name = 'Sortmerna'
file_type_name = 'Non-ribosomal reads'
ainfo.extend(_per_sample_ainfo(
out_dir, samples, suffixes, prg_name, file_type_name, bool(rs)))
# Step 5 generating artifacts for Ribosomal reads
msg = ("Step 5 of 5: Generating artifacts "
"for Ribosomal reads (%d/{0})").format(len_cmd)
suffixes = ['%s.ribosomal.R1.fastq.gz', '%s.ribosomal.R2.fastq.gz']
prg_name = 'Sortmerna'
file_type_name = 'Ribosomal reads'
ainfo.extend(_per_sample_ainfo(
out_dir, samples, suffixes, prg_name, file_type_name, bool(rs)))
return True, ainfo, ""
|
antgonza/qp-shotgun
|
qp_shogun/sortmerna/sortmerna.py
|
Python
|
bsd-3-clause
| 6,980
|
[
"BLAST"
] |
ff7715f5f2fd16de97d8cb62a9c65c51f4246b0e4eaf1236a4a6b69a6981bc98
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.